max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
Calibration/HcalAlCaRecoProducers/test/AlCaHBHEMuonProducer_cfg.py | ckamtsikis/cmssw | 852 | 12693727 | import FWCore.ParameterSet.Config as cms
process = cms.Process("AlCaHBHEMuon")
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag=autoCond['run2_data']
process.load("Calibration.HcalAlCaRecoProducers.alcahbhemuon_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
'file:/afs/cern.ch/user/a/amkalsi/public/RecoFileForAlcaProducer.root'
# 'root://xrootd.unl.edu//store/mc/Phys14DR/DYToMuMu_M-50_Tune4C_13TeV-pythia8/GEN-SIM-RECO/PU20bx25_tsg_castor_PHYS14_25_V1-v1/10000/184C1AC9-A775-E411-9196-002590200824.root'
)
)
process.load("Calibration.HcalAlCaRecoProducers.ALCARECOHcalHBHEMuon_Output_cff")
process.muonOutput = cms.OutputModule("PoolOutputModule",
outputCommands = process.OutALCARECOHcalHBHEMuon.outputCommands,
fileName = cms.untracked.string('PoolOutput.root'),
)
process.p = cms.Path(process.HBHEMuonProd)
process.e = cms.EndPath(process.muonOutput)
|
plugins/dbnd-test-scenarios/src/dbnd_test_scenarios/scenarios_repo.py | busunkim96/dbnd | 224 | 12693758 | <reponame>busunkim96/dbnd
import datetime
import logging
from dbnd._core.utils.project.project_fs import abs_join, relative_path
from dbnd_test_scenarios.utils.data_chaos_monkey.client_scoring_chaos import (
is_chaos_column_10,
)
from targets import target
logger = logging.getLogger(__name__)
_PLUGIN_ROOT = relative_path(__file__, "..", "..")
_PLUGIN_SRC_ROOT = relative_path(__file__)
def scenario_root_path(*path):
return abs_join(_PLUGIN_ROOT, *path)
def scenario_src_path(*path):
return abs_join(_PLUGIN_SRC_ROOT, *path)
def test_scenario_path(*path):
return scenario_root_path("scenarios", *path)
def test_scenario_target(*path):
return target(test_scenario_path(*path))
def scenario_data_path(*path):
return scenario_root_path("data", *path)
def scenario_data_target(*path):
return target(scenario_data_path(*path))
def scenario_pyspark_path(*path):
return scenario_src_path("spark", "pyspark_scripts", *path)
class _Scenarios(object):
pass
class _ScenariosClientScoringData(object):
p_g_ingest_data = scenario_data_target("client_scoring/p_g_ready_for_ingest.csv")
p_g_ingest_data__no_col_10 = scenario_data_target(
"client_scoring/p_g_ready_for_ingest__no_col_10.csv"
)
p_g_train_data = scenario_data_target("client_scoring/p_g_ready_for_train.csv")
p_a_master_data_bad = scenario_data_target("client_scoring/p_a_master_data_bad.csv")
partners = ["autolab", "picsdata", "myp"]
partners_big = ["autobig", "picsbig"]
def get_ingest_data(self, partner, target_date_str):
target_date = datetime.datetime.strptime(target_date_str, "%Y-%m-%d").date()
if is_chaos_column_10(partner, target_date):
return self.p_g_ingest_data__no_col_10
return self.p_g_ingest_data
scenarios = _Scenarios()
client_scoring_data = _ScenariosClientScoringData()
|
models/slimmable/__init__.py | haolibai/dmcp | 119 | 12693769 | # -*- coding:utf-8 -*-
from models.slimmable.us_resnet import us_resnet18, us_resnet50
from models.slimmable.us_mobilenet import us_mobilenet_v2
|
tests/formatter/to_python_test.py | terencehonles/bravado-core | 122 | 12693780 | # -*- coding: utf-8 -*-
from datetime import date
from datetime import datetime
import six
from mock import patch
from bravado_core.formatter import SwaggerFormat
from bravado_core.formatter import to_python
from bravado_core.spec import Spec
if not six.PY2:
long = int
def test_none(minimal_swagger_spec):
string_spec = {'type': 'string', 'format': 'date'}
assert to_python(minimal_swagger_spec, string_spec, None) is None
def test_no_format_returns_value(minimal_swagger_spec):
string_spec = {'type': 'string'}
assert 'boo' == to_python(minimal_swagger_spec, string_spec, 'boo')
def test_date(minimal_swagger_spec):
string_spec = {'type': 'string', 'format': 'date'}
assert date(2015, 4, 1) == to_python(
minimal_swagger_spec, string_spec, '2015-04-01',
)
def test_datetime(minimal_swagger_spec):
string_spec = {'type': 'string', 'format': 'date-time'}
result = to_python(
minimal_swagger_spec, string_spec, '2015-03-22T13:19:54',
)
assert datetime(2015, 3, 22, 13, 19, 54) == result
@patch('bravado_core.spec.warnings.warn')
def test_no_registered_format_returns_value_as_is_and_issues_warning(mock_warn, minimal_swagger_spec):
string_spec = {'type': 'string', 'format': 'bar'}
assert 'baz' == to_python(minimal_swagger_spec, string_spec, 'baz')
assert mock_warn.call_count == 1
def test_int64_long(minimal_swagger_spec):
integer_spec = {'type': 'integer', 'format': 'int64'}
result = to_python(minimal_swagger_spec, integer_spec, long(999))
assert long(999) == result
def test_int64_int(minimal_swagger_spec):
integer_spec = {'type': 'integer', 'format': 'int64'}
result = to_python(minimal_swagger_spec, integer_spec, 999)
assert long(999) == result
assert isinstance(result, long)
def test_int32_long(minimal_swagger_spec):
integer_spec = {'type': 'integer', 'format': 'int32'}
result = to_python(minimal_swagger_spec, integer_spec, long(999))
assert 999 == result
assert isinstance(result, int)
def test_int32_int(minimal_swagger_spec):
integer_spec = {'type': 'integer', 'format': 'int32'}
result = to_python(minimal_swagger_spec, integer_spec, 999)
assert 999 == result
assert isinstance(result, int)
def test_float(minimal_swagger_spec):
float_spec = {'type': 'number', 'format': 'float'}
result = to_python(minimal_swagger_spec, float_spec, float(3.14))
assert 3.14 == result
assert isinstance(result, float)
def test_double(minimal_swagger_spec):
double_spec = {'type': 'number', 'format': 'double'}
result = to_python(minimal_swagger_spec, double_spec, float(3.14))
assert 3.14 == result
assert isinstance(result, float)
def test_byte(minimal_swagger_spec):
byte_spec = {'type': 'string', 'format': 'byte'}
result = to_python(minimal_swagger_spec, byte_spec, 'x')
assert 'x' == result
assert isinstance(result, str)
def test_byte_base64(minimal_swagger_dict):
swagger_spec = Spec.from_dict(
minimal_swagger_dict, config={'use_base64_for_byte_format': True},
)
schema = {'type': 'string', 'format': 'byte'}
result = to_python(swagger_spec, schema, 'YWJj/w==')
assert b'abc\xff' == result
assert isinstance(result, bytes)
def test_ref(minimal_swagger_dict):
minimal_swagger_dict['definitions']['Int32'] = {
'type': 'integer', 'format': 'int32',
}
int_ref_spec = {'$ref': '#/definitions/Int32'}
swagger_spec = Spec.from_dict(minimal_swagger_dict)
result = to_python(swagger_spec, int_ref_spec, 999)
assert 999 == result
assert isinstance(result, int)
def test_override(minimal_swagger_dict):
class Byte(object):
def __init__(self, x):
self.x = x
def __str__(self):
return str(self.x)
def __repr__(self):
return '%s(%r)' % (self.__class__, self.x)
byteformat = SwaggerFormat(
format='byte',
to_wire=lambda x: str(x),
to_python=lambda x: Byte(x),
validate=lambda x: isinstance(x, str),
description=None,
)
number_spec = {'type': 'string', 'format': 'byte'}
swagger_spec = Spec.from_dict(minimal_swagger_dict, config={'formats': [byteformat]})
result = to_python(swagger_spec, number_spec, '8bits')
assert '8bits' == str(result)
assert repr(Byte('8bits')) == repr(result)
assert type(result) is Byte
|
deployment/docker flask fit predict/train_model.py | Diyago/ML-DL-scripts | 142 | 12693820 | <reponame>Diyago/ML-DL-scripts
import numpy as np
from sklearn import datasets
from sklearn.decomposition import PCA
np.random.seed(0)
# import some data to play with
iris_X, iris_y = datasets.load_iris(return_X_y=True)
indices = np.random.permutation(len(iris_X))
iris_X_train = iris_X[indices[:-10]]
iris_y_train = iris_y[indices[:-10]]
# Create and fit a nearest-neighbor classifier
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(iris_X_train, iris_y_train)
from sklearn.externals import joblib
joblib.dump(knn, "knn.pkl")
|
saspy/SASLogLexer.py | metllord/saspy | 317 | 12693829 | <filename>saspy/SASLogLexer.py
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pygments.lexer import RegexLexer
import pygments.token
from pygments.style import Style
class SASLogStyle(Style):
default_style = ""
styles = {
pygments.token.Comment: '#0000FF',
pygments.token.Keyword: 'bold #ff0000',
pygments.token.Name: '#008000',
pygments.token.String: '#111'
}
class SASLogLexer(RegexLexer):
__all__ = ['SASLogLexer']
name = 'Lexer to Color SAS Logs equivalent to DMS'
tokens = {
'root': [
(r'^\d+.*((\n|\t|\n\t)[ ]([^WEN].*)(.*))*', pygments.token.String),
(r'^NOTE.*((\n|\t|\n\t)[ ]([^WEN].*)(.*))*', pygments.token.Comment.Multiline, 'note'),
(r'^ERROR.*((\n|\t|\n\t)[ ]([^WEN].*)(.*))*', pygments.token.Keyword.Multiline, 'error'),
(r'^WARNING.*((\n|\t|\n\t)[ ]([^WEN].*)(.*))*', pygments.token.Name.Multiline, 'warning'),
(r'\s', pygments.token.Text)
],
'error': [
(r'^\s+.*$', pygments.token.Keyword.Multiline),
(r'^\S+.*$', pygments.token.Keyword.Multiline, '#pop')
],
'note': [
(r'^\s+.*$', pygments.token.Comment.Multiline),
(r'^\S+.*$', pygments.token.Comment.Multiline, '#pop')
],
'warning': [
(r'^\s+.*$', pygments.token.Name.Multiline),
(r'^\S+.*$', pygments.token.Name.Multiline, '#pop')
]
}
|
setup.py | xlotlu/weresync | 206 | 12693833 | #! /usr/bin/env python3
import os
from setuptools import setup, find_packages
import subprocess
import shutil
class InvalidSetupError(Exception):
pass
def create_mo_files():
"""Converts .po templates to readble .mo files using msgfmt."""
# Avoids this code running on read the docs, since gettext is not installed
# there
if os.environ.get("READTHEDOCS") == "True":
return []
if shutil.which("msgfmt") is None:
# If gettext isn't installed, skip this
raise InvalidSetupError("gettext not installed but is required.")
localedir = 'src/weresync/resources/locale'
po_dirs = []
langs = next(os.walk(localedir))[1]
po_dirs = [localedir + '/' + l + '/LC_MESSAGES/' for l in langs]
for d in po_dirs:
po_files = [
f for f in next(os.walk(d))[2] if os.path.splitext(f)[1] == '.po'
]
for po_file in po_files:
filename, extension = os.path.splitext(po_file)
mo_file = filename + '.mo'
msgfmt_cmd = 'msgfmt {} -o {}'.format(d + po_file, d + mo_file)
subprocess.call(msgfmt_cmd, shell=True)
return ["locale/" + l + "/LC_MESSAGES/*.mo" for l in langs]
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as file:
return file.read()
target_icon_loc = "share/icons/hicolor/scalable/apps"
if os.getuid() == 0: # Install is running as root
target_icon_loc = "/usr/" + target_icon_loc
if __name__ == "__main__":
setup(
name="WereSync",
version="1.1.5",
package_dir={"": "src"},
packages=find_packages("src"),
install_requires=["parse==1.6.6", "yapsy==1.11.223", "pydbus==0.6.0"],
entry_points={
'console_scripts': [
"weresync = weresync.interface.cli:main",
"weresync-daemon = weresync.daemon.daemon:run"
],
'gui_scripts': ["weresync-gui = weresync.interface.gui:start_gui"]
},
package_data={
"weresync.resources": ["*.svg", "*.png", "weresync*.*"] + create_mo_files()
},
data_files=[(target_icon_loc,
["src/weresync/resources/weresync.svg"])],
# Metadata
author="<NAME>",
author_email="<EMAIL>",
description="Incrementally clones Linux drives",
long_description=read("README.rst"),
license="Apache 2.0",
keywords="clone, linux, backup, smaller drive",
url="https://github.com/DonyorM/weresync",
)
|
test/test_posetrack.py | collector-m/UniTrack | 240 | 12693858 | import os
import pdb
import os.path as osp
import sys
sys.path[0] = os.getcwd()
import cv2
import copy
import json
import yaml
import logging
import argparse
from tqdm import tqdm
from itertools import groupby
import pycocotools.mask as mask_utils
import numpy as np
import torch
from torchvision.transforms import transforms as T
from utils.log import logger
from utils.meter import Timer
from utils.mask import pts2array
import data.video as videodataset
from utils import visualize as vis
from utils.io import mkdir_if_missing
from core.association import matching
from tracker.mot.pose import PoseAssociationTracker
def identical(a, b):
if len(a) == len(b):
arra = pts2array(a)
arrb = pts2array(b)
if np.abs(arra-arrb).sum() < 1e-2:
return True
return False
def fuse_result(res, jpath):
with open(jpath, 'r') as f:
obsj = json.load(f)
obsj_fused = copy.deepcopy(obsj)
for t, inpj in enumerate(obsj['annolist']):
skltns, ids = res[t][2], res[t][3]
nobj_ori = len(obsj['annolist'][t]['annorect'])
for i in range(nobj_ori):
obsj_fused['annolist'][t]['annorect'][i]['track_id'] = [1000]
for j, skltn in enumerate(skltns):
match = identical(obsj['annolist'][t]['annorect'][i]['annopoints'][0]['point'], skltn)
if match:
obsj_fused['annolist'][t]['annorect'][i]['track_id'] = [ids[j],]
return obsj_fused
def eval_seq(opt, dataloader, save_dir=None):
if save_dir:
mkdir_if_missing(save_dir)
tracker = PoseAssociationTracker(opt)
timer = Timer()
results = []
for frame_id, (img, obs, img0, _) in enumerate(dataloader):
# run tracking
timer.tic()
online_targets = tracker.update(img, img0, obs)
online_tlwhs = []
online_ids = []
online_poses = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
online_tlwhs.append(tlwh)
online_ids.append(tid)
online_poses.append(t.pose)
timer.toc()
# save results
results.append((frame_id + 1, online_tlwhs, online_poses, online_ids))
if save_dir is not None:
online_im = vis.plot_tracking(img0, online_tlwhs,
online_ids, frame_id=frame_id, fps=1. / timer.average_time)
if save_dir is not None:
cv2.imwrite(os.path.join(
save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
return results, timer.average_time, timer.calls
def main(opt):
logger.setLevel(logging.INFO)
result_root = opt.out_root
result_json_root = osp.join(result_root, 'json')
mkdir_if_missing(result_json_root)
transforms= T.Compose([T.ToTensor(), T.Normalize(opt.im_mean, opt.im_std)])
obs_root = osp.join(opt.data_root, 'obs', opt.split, opt.obid)
obs_jpaths = [osp.join(obs_root, o) for o in os.listdir(obs_root)]
obs_jpaths = sorted([o for o in obs_jpaths if o.endswith('.json')])
# run tracking
accs = []
timer_avgs, timer_calls = [], []
for i, obs_jpath in enumerate(obs_jpaths):
seqname = obs_jpath.split('/')[-1].split('.')[0]
output_dir = osp.join(result_root, 'frame', seqname)
dataloader = videodataset.LoadImagesAndPoseObs(obs_jpath, opt)
seq_res, ta, tc = eval_seq(opt, dataloader, save_dir=output_dir)
seq_json = fuse_result(seq_res, obs_jpath)
with open(osp.join(result_json_root, "{}.json".format(seqname)), 'w') as f:
json.dump(seq_json, f)
timer_avgs.append(ta)
timer_calls.append(tc)
# eval
logger.info('Evaluate seq: {}'.format(seqname))
if opt.save_videos:
output_video_path = osp.join(output_dir, '{}.mp4'.format(seqname))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
os.system(cmd_str)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = all_time / np.sum(timer_calls)
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))
cmd_str = ('python ./eval/poseval/evaluate.py --groundTruth={}/posetrack_data/annotations/{} '
'--predictions={}/ --evalPoseTracking'.format(opt.data_root, opt.split, result_json_root))
os.system(cmd_str)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='', required=True, type=str)
opt = parser.parse_args()
with open(opt.config) as f:
common_args = yaml.load(f)
for k, v in common_args['common'].items():
setattr(opt, k, v)
for k, v in common_args['posetrack'].items():
setattr(opt, k, v)
opt.out_root = osp.join('results/pose', opt.exp_name)
opt.out_file = osp.join('results/pose', opt.exp_name + '.json')
print(opt, end='\n\n')
main(opt)
|
tests/datatypes/totest-anyURI.py | eLBati/pyxb | 123 | 12693871 | # -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import unittest
import pyxb.binding.datatypes as xsd
class Test_anyURI (unittest.TestCase):
def testRange (self):
self.fail("Datatype anyURI test not implemented")
if __name__ == '__main__':
unittest.main()
|
localgraphclustering/GraphLocal.py | vishalbelsare/LocalGraphClustering | 106 | 12693873 | import networkx as nx
import csv
from scipy import sparse as sp
from scipy.sparse import csgraph
import scipy.sparse.linalg as splinalg
import numpy as np
import pandas as pd
import warnings
import collections as cole
from .cpp import *
import random
import gzip
import bz2
import lzma
import multiprocessing as mp
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import LineCollection
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from matplotlib.colors import to_rgb,to_rgba
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import Normalize
from matplotlib.cm import ScalarMappable
from collections import defaultdict
from .GraphDrawing import GraphDrawing
def _load_from_shared(sabuf, dtype, shape):
return np.frombuffer(sabuf, dtype=dtype).reshape(shape)
""" Create shared memory that can be passed to a child process,
wrapped in a numpy array."""
def _copy_to_shared(a):
# determine the numpy type of a.
dtype = a.dtype
shape = a.shape
sabuf = mp.RawArray(ctypes.c_uint8, a.nbytes)
sa = _load_from_shared(sabuf, dtype, shape)
np.copyto(sa, a) # make a copy
return sa, (sabuf, dtype, shape)
class GraphLocal:
"""
This class implements graph loading from an edgelist, gml or graphml and provides methods that operate on the graph.
Attributes
----------
adjacency_matrix : scipy csr matrix
ai : numpy vector
CSC format index pointer array, its data type is determined by "itype" during initialization
aj : numpy vector
CSC format index array, its data type is determined by "vtype" during initialization
_num_vertices : int
Number of vertices
_num_edges : int
Number of edges
_weighted : boolean
Declares if it is a weighted graph or not
d : float64 numpy vector
Degrees vector
dn : float64 numpy vector
Component-wise reciprocal of degrees vector
d_sqrt : float64 numpy vector
Component-wise square root of degrees vector
dn_sqrt : float64 numpy vector
Component-wise reciprocal of sqaure root degrees vector
vol_G : float64 numpy vector
Volume of graph
components : list of sets
Each set contains the indices of a connected component of the graph
number_of_components : int
Number of connected components of the graph
bicomponents : list of sets
Each set contains the indices of a biconnected component of the graph
number_of_bicomponents : int
Number of connected components of the graph
core_numbers : dictionary
Core number for each vertex
Methods
-------
read_graph(filename, file_type='edgelist', separator='\t')
Reads the graph from a file
compute_statistics()
Computes statistics for the graph
connected_components()
Computes the connected components of the graph
is_disconnected()
Checks if graph is connected
biconnected_components():
Computes the biconnected components of the graph
core_number()
Returns the core number for each vertex
neighbors(vertex)
Returns a list with the neighbors of the given vertex
list_to_gl(source,target)
Create a GraphLocal object from edge list
"""
def __init__(self,
filename = None,
file_type='edgelist',
separator='\t',
remove_whitespace=False,header=False, headerrow=None,
vtype=np.uint32,itype=np.uint32):
"""
Initializes the graph from a gml or a edgelist file and initializes the attributes of the class.
Parameters
----------
See read_graph for a description of the parameters.
"""
if filename != None:
self.read_graph(filename, file_type = file_type, separator = separator, remove_whitespace = remove_whitespace,
header = header, headerrow = headerrow, vtype=vtype, itype=itype)
def __eq__(self,other):
if not isinstance(other, GraphLocal):
return NotImplemented
return np.array_equal(self.ai,other.ai) and np.array_equal(self.aj,other.aj) and np.array_equal(self.adjacency_matrix.data,other.adjacency_matrix.data)
def read_graph(self, filename, file_type='edgelist', separator='\t', remove_whitespace=False, header=False, headerrow=None, vtype=np.uint32, itype=np.uint32):
"""
Reads the graph from an edgelist, gml or graphml file and initializes the class attribute adjacency_matrix.
Parameters
----------
filename : string
Name of the file, for example 'JohnsHopkins.edgelist', 'JohnsHopkins.gml', 'JohnsHopkins.graphml'.
file_type : string
Type of file. Currently only 'edgelist', 'gml' and 'graphml' are supported.
Default = 'edgelist'
separator : string
used if file_type = 'edgelist'
Default = '\t'
remove_whitespace : bool
set it to be True when there is more than one kinds of separators in the file
Default = False
header : bool
This lets the first line of the file contain a set of heade
information that should be ignore_index
Default = False
headerrow : int
Use which row as column names. This argument takes precidence over
the header=True using headerrow = 0
Default = None
vtype
numpy integer type of CSC format index array
Default = np.uint32
itype
numpy integer type of CSC format index pointer array
Default = np.uint32
"""
if file_type == 'edgelist':
#dtype = {0:'int32', 1:'int32', 2:'float64'}
if header and headerrow is None:
headerrow = 0
if remove_whitespace:
df = pd.read_csv(filename, header=headerrow, delim_whitespace=remove_whitespace)
else:
df = pd.read_csv(filename, sep=separator, header=headerrow, delim_whitespace=remove_whitespace)
cols = [0,1,2]
if header != None:
cols = list(df.columns)
source = df[cols[0]].values
target = df[cols[1]].values
if df.shape[1] == 2:
weights = np.ones(source.shape[0])
elif df.shape[1] == 3:
weights = df[cols[2]].values
else:
raise Exception('GraphLocal.read_graph: df.shape[1] not in (2, 3)')
self._num_vertices = max(source.max() + 1, target.max()+1)
#self.adjacency_matrix = source, target, weights
self.adjacency_matrix = sp.csr_matrix((weights.astype(np.float64), (source, target)), shape=(self._num_vertices, self._num_vertices))
elif file_type == 'gml':
warnings.warn("Loading a gml is not efficient, we suggest using an edgelist format for this API.")
G = nx.read_gml(filename).to_undirected()
self.adjacency_matrix = nx.adjacency_matrix(G).astype(np.float64)
self._num_vertices = nx.number_of_nodes(G)
elif file_type == 'graphml':
warnings.warn("Loading a graphml is not efficient, we suggest using an edgelist format for this API.")
G = nx.read_graphml(filename).to_undirected()
self.adjacency_matrix = nx.adjacency_matrix(G).astype(np.float64)
self._num_vertices = nx.number_of_nodes(G)
else:
print('This file type is not supported')
return
self._weighted = False
for i in self.adjacency_matrix.data:
if i != 1:
self._weighted = True
break
is_symmetric = (self.adjacency_matrix != self.adjacency_matrix.T).sum() == 0
if not is_symmetric:
# Symmetrize matrix, choosing larger weight
sel = self.adjacency_matrix.T > self.adjacency_matrix
self.adjacency_matrix = self.adjacency_matrix - self.adjacency_matrix.multiply(sel) + self.adjacency_matrix.T.multiply(sel)
assert (self.adjacency_matrix != self.adjacency_matrix.T).sum() == 0
self._num_edges = self.adjacency_matrix.nnz
self.compute_statistics()
self.ai = itype(self.adjacency_matrix.indptr)
self.aj = vtype(self.adjacency_matrix.indices)
@classmethod
def from_networkx(cls,G):
"""
Create a GraphLocal object from a networkx graph.
Paramters
---------
G
The networkx graph.
"""
if G.is_directed() == True:
raise Exception("from_networkx requires an undirected graph, use G.to_undirected()")
rval = cls()
rval.adjacency_matrix = nx.adjacency_matrix(G).astype(np.float64)
rval._num_vertices = nx.number_of_nodes(G)
# TODO, use this in the read_graph
rval._weighted = False
for i in rval.adjacency_matrix.data:
if i != 1:
rval._weighted = True
break
# automatically determine sizes
if G.number_of_nodes() < 4294967295:
vtype = np.uint32
else:
vtype = np.int64
if 2*G.number_of_edges() < 4294967295:
itype = np.uint32
else:
itype = np.int64
rval._num_edges = rval.adjacency_matrix.nnz
rval.compute_statistics()
rval.ai = itype(rval.adjacency_matrix.indptr)
rval.aj = vtype(rval.adjacency_matrix.indices)
return rval
@classmethod
def from_sparse_adjacency(cls,A):
"""
Create a GraphLocal object from a sparse adjacency matrix.
Paramters
---------
A
Adjacency matrix.
"""
self = cls()
self.adjacency_matrix = A.copy()
self._num_vertices = A.shape[0]
self._num_edges = A.nnz
# TODO, use this in the read_graph
self._weighted = False
for i in self.adjacency_matrix.data:
if i != 1:
self._weighted = True
break
# automatically determine sizes
if self._num_vertices < 4294967295:
vtype = np.uint32
else:
vtype = np.int64
if 2*self._num_edges < 4294967295:
itype = np.uint32
else:
itype = np.int64
self.compute_statistics()
self.ai = itype(self.adjacency_matrix.indptr)
self.aj = vtype(self.adjacency_matrix.indices)
return self
def renew_data(self,A):
"""
Update data because the adjacency matrix changed
Paramters
---------
A
Adjacency matrix.
"""
self._num_edges = A.nnz
# TODO, use this in the read_graph
self._weighted = False
for i in self.adjacency_matrix.data:
if i != 1:
self._weighted = True
break
# automatically determine sizes
if self._num_vertices < 4294967295:
vtype = np.uint32
else:
vtype = np.int64
if 2*self._num_edges < 4294967295:
itype = np.uint32
else:
itype = np.int64
self.compute_statistics()
self.ai = itype(self.adjacency_matrix.indptr)
self.aj = vtype(self.adjacency_matrix.indices)
def list_to_gl(self,source,target,weights,vtype=np.uint32, itype=np.uint32):
"""
Create a GraphLocal object from edge list.
Parameters
----------
source
A numpy array of sources for the edges
target
A numpy array of targets for the edges
weights
A numpy array of weights for the edges
vtype
numpy integer type of CSC format index array
Default = np.uint32
itype
numpy integer type of CSC format index pointer array
Default = np.uint32
"""
# TODO, fix this up to avoid duplicating code with read...
source = np.array(source,dtype=vtype)
target = np.array(target,dtype=vtype)
weights = np.array(weights,dtype=np.double)
self._num_edges = len(source)
self._num_vertices = max(source.max() + 1, target.max()+1)
self.adjacency_matrix = sp.csr_matrix((weights.astype(np.float64), (source, target)), shape=(self._num_vertices, self._num_vertices))
self._weighted = False
for i in self.adjacency_matrix.data:
if i != 1:
self._weighted = True
break
is_symmetric = (self.adjacency_matrix != self.adjacency_matrix.T).sum() == 0
if not is_symmetric:
# Symmetrize matrix, choosing larger weight
sel = self.adjacency_matrix.T > self.adjacency_matrix
self.adjacency_matrix = self.adjacency_matrix - self.adjacency_matrix.multiply(sel) + self.adjacency_matrix.T.multiply(sel)
assert (self.adjacency_matrix != self.adjacency_matrix.T).sum() == 0
self._num_edges = self.adjacency_matrix.nnz
self.compute_statistics()
self.ai = itype(self.adjacency_matrix.indptr)
self.aj = vtype(self.adjacency_matrix.indices)
def discard_weights(self):
""" Discard any weights that were loaded from the data file.
This sets all the weights associated with each edge to 1.0,
which is our "no weight" case."""
self.adjacency_matrix.data.fill(1.0)
self._weighted = False
self.compute_statistics()
def compute_statistics(self):
"""
Computes statistics for the graph. It updates the class attributes.
The user needs to read the graph first before calling
this method by calling the read_graph method from this class.
"""
self.d = np.ravel(self.adjacency_matrix.sum(axis=1))
self.dn = np.zeros(self._num_vertices)
self.dn[self.d != 0] = 1.0 / self.d[self.d != 0]
self.d_sqrt = np.sqrt(self.d)
self.dn_sqrt = np.sqrt(self.dn)
self.vol_G = np.sum(self.d)
def to_shared(self):
""" Re-create the graph data with multiprocessing compatible
shared-memory arrays that can be passed to child-processes.
This returns a dictionary that allows the graph to be
re-created in a child-process from that variable and
the method "from_shared"
At this moment, this doesn't send any data from components,
core_numbers, or biconnected_components
"""
sgraphvars = {}
self.ai, sgraphvars["ai"] = _copy_to_shared(self.ai)
self.aj, sgraphvars["aj"] = _copy_to_shared(self.aj)
self.d, sgraphvars["d"] = _copy_to_shared(self.d)
self.dn, sgraphvars["dn"] = _copy_to_shared(self.dn)
self.d_sqrt, sgraphvars["d_sqrt"] = _copy_to_shared(self.d_sqrt)
self.dn_sqrt, sgraphvars["dn_sqrt"] = _copy_to_shared(self.dn_sqrt)
self.adjacency_matrix.data, sgraphvars["a"] = _copy_to_shared(self.adjacency_matrix.data)
# this will rebuild without copying
# so that copies should all be accessing exactly the same
# arrays for caching
self.adjacency_matrix = sp.csr_matrix(
(self.adjacency_matrix.data, self.aj, self.ai),
shape=(self._num_vertices, self._num_vertices))
# scalars
sgraphvars["n"] = self._num_vertices
sgraphvars["m"] = self._num_edges
sgraphvars["vol"] = self.vol_G
sgraphvars["weighted"] = self._weighted
return sgraphvars
@classmethod
def from_shared(cls, sgraphvars):
""" Return a graph object from the output of "to_shared". """
g = cls()
g._num_vertices = sgraphvars["n"]
g._num_edges = sgraphvars["m"]
g._weighted = sgraphvars["weighted"]
g.vol_G = sgraphvars["vol"]
g.ai = _load_from_shared(*sgraphvars["ai"])
g.aj = _load_from_shared(*sgraphvars["aj"])
g.adjacency_matrix = sp.csr_matrix(
(_load_from_shared(*sgraphvars["a"]), g.aj, g.ai),
shape=(g._num_vertices, g._num_vertices))
g.d = _load_from_shared(*sgraphvars["d"])
g.dn = _load_from_shared(*sgraphvars["dn"])
g.d_sqrt = _load_from_shared(*sgraphvars["d_sqrt"])
g.dn_sqrt = _load_from_shared(*sgraphvars["dn_sqrt"])
return g
def connected_components(self):
"""
Computes the connected components of the graph. It stores the results in class attributes components
and number_of_components. The user needs to call read the graph
first before calling this function by calling the read_graph function from this class.
"""
output = csgraph.connected_components(self.adjacency_matrix,directed=False)
self.components = output[1]
self.number_of_components = output[0]
print('There are ', self.number_of_components, ' connected components in the graph')
def is_disconnected(self):
"""
The output can be accessed from the graph object that calls this function.
Checks if the graph is a disconnected graph. It prints the result as a comment and
returns True if the graph is disconnected, or false otherwise. The user needs to
call read the graph first before calling this function by calling the read_graph function from this class.
This function calls Networkx.
Returns
-------
True
If connected
False
If disconnected
"""
if self.d == []:
print('The graph has to be read first.')
return
self.connected_components()
if self.number_of_components > 1:
print('The graph is a disconnected graph.')
return True
else:
print('The graph is not a disconnected graph.')
return False
def biconnected_components(self):
"""
Computes the biconnected components of the graph. It stores the results in class attributes bicomponents
and number_of_bicomponents. The user needs to call read the graph first before calling this
function by calling the read_graph function from this class. This function calls Networkx.
"""
warnings.warn("Warning, biconnected_components is not efficiently implemented.")
g_nx = nx.from_scipy_sparse_matrix(self.adjacency_matrix)
self.bicomponents = list(nx.biconnected_components(g_nx))
self.number_of_bicomponents = len(self.bicomponents)
def core_number(self):
"""
Returns the core number for each vertex. A k-core is a maximal
subgraph that contains nodes of degree k or more. The core number of a node
is the largest value k of a k-core containing that node. The user needs to
call read the graph first before calling this function by calling the read_graph
function from this class. The output can be accessed from the graph object that
calls this function. It stores the results in class attribute core_numbers.
"""
warnings.warn("Warning, core_number is not efficiently implemented.")
g_nx = nx.from_scipy_sparse_matrix(self.adjacency_matrix)
self.core_numbers = nx.core_number(g_nx)
def neighbors(self,vertex):
"""
Returns a list with the neighbors of the given vertex.
"""
# this will be faster since we store the arrays ourselves.
return self.aj[self.ai[vertex]:self.ai[vertex+1]].tolist()
#return self.adjacency_matrix[:,vertex].nonzero()[0].tolist()
def compute_conductance(self,R,cpp=True):
"""
Return conductance of a set of vertices.
"""
records = self.set_scores(R,cpp=cpp)
return records["cond"]
def set_scores(self,R,cpp=True):
"""
Return various metrics of a set of vertices.
"""
voltrue,cut = 0,0
if cpp:
voltrue, cut = set_scores_cpp(self._num_vertices,self.ai,self.aj,self.adjacency_matrix.data,self.d,R,self._weighted)
else:
voltrue = sum(self.d[R])
v_ones_R = np.zeros(self._num_vertices)
v_ones_R[R] = 1
cut = voltrue - np.dot(v_ones_R,self.adjacency_matrix.dot(v_ones_R.T))
voleff = min(voltrue,self.vol_G - voltrue)
sizetrue = len(R)
sizeeff = sizetrue
if voleff < voltrue:
sizeeff = self._num_vertices - sizetrue
# remove the stuff we don't want returned...
del R
del self
if not cpp:
del v_ones_R
del cpp
edgestrue = voltrue - cut
edgeseff = voleff - cut
cond = cut / voleff if voleff != 0 else 1
isop = cut / sizeeff if sizeeff != 0 else 1
# make a dictionary out of local variables
return locals()
def largest_component(self):
self.connected_components()
if self.number_of_components == 1:
#self.compute_statistics()
return self
else:
# find nodes of largest component
counter=cole.Counter(self.components)
maxccnodes = []
what_key = counter.most_common(1)[0][0]
for i in range(self._num_vertices):
if what_key == self.components[i]:
maxccnodes.append(i)
# biggest component by len of it's list of nodes
#maxccnodes = max(self.components, key=len)
#maxccnodes = list(maxccnodes)
warnings.warn("The graph has multiple (%i) components, using the largest with %i / %i nodes"%(
self.number_of_components, len(maxccnodes), self._num_vertices))
g_copy = GraphLocal()
g_copy.adjacency_matrix = self.adjacency_matrix[maxccnodes,:].tocsc()[:,maxccnodes].tocsr()
g_copy._num_vertices = len(maxccnodes) # AHH!
g_copy.compute_statistics()
g_copy._weighted = self._weighted
dt = np.dtype(self.ai[0])
itype = np.int64 if dt.name == 'int64' else np.uint32
dt = np.dtype(self.aj[0])
vtype = np.int64 if dt.name == 'int64' else np.uint32
g_copy.ai = itype(g_copy.adjacency_matrix.indptr)
g_copy.aj = vtype(g_copy.adjacency_matrix.indices)
g_copy._num_edges = g_copy.adjacency_matrix.nnz
return g_copy
def local_extrema(self,vals,strict=False,reverse=False):
"""
Find extrema in a graph based on a set of values.
Parameters
----------
vals: Sequence[float]
a feature value per node used to find the ex against each other, i.e. conductance
strict: bool
If True, find a set of vertices where vals(i) < vals(j) for all neighbors N(j)
i.e. local minima in the space of the graph
If False, find a set of vertices where vals(i) <= vals(j) for all neighbors N(j)
i.e. local minima in the space of the graph
reverse: bool
if True, then find local maxima, if False then find local minima
(by default, this is false, so we find local minima)
Returns
-------
minverts: Sequence[int]
the set of vertices
minvals: Sequence[float]
the set of min values
"""
n = self.adjacency_matrix.shape[0]
minverts = []
ai = self.ai
aj = self.aj
factor = 1.0
if reverse:
factor = -1.0
for i in range(n):
vali = factor*vals[i]
lmin = True
for nzi in range(ai[i],ai[i+1]):
v = aj[nzi]
if v == i:
continue # skip self-loops
if strict:
if vali < factor*vals[v]:
continue
else:
lmin = False
else:
if vali <= factor*vals[v]:
continue
else:
lmin = False
if lmin == False:
break # break out of the loop
if lmin:
minverts.append(i)
minvals = vals[minverts]
return minverts, minvals
@staticmethod
def _plotting(drawing,edgecolor,edgealpha,linewidth,is_3d,**kwargs):
"""
private function to do the plotting
"**kwargs" represents all possible optional parameters of "scatter" function
in matplotlib.pyplot
"""
drawing.scatter(**kwargs)
drawing.plot(color=edgecolor,alpha=edgealpha,linewidths=linewidth)
axs = drawing.ax
axs.autoscale()
if is_3d == 3:
# Set the initial view
axs.view_init(30, angle)
def draw(self,coords,alpha=1.0,nodesize=5,linewidth=1,
nodealpha=1.0,edgealpha=1.0,edgecolor='k',nodemarker='o',
axs=None,fig=None,values=None,cm=None,valuecenter=None,angle=30,
figsize=None,nodecolor='r'):
"""
Standard drawing function when having single cluster
Parameters
----------
coords: a n-by-2 or n-by-3 array with coordinates for each node of the graph.
Optional parameters
------------------
alpha: float (1.0 by default)
the overall alpha scaling of the plot, [0,1]
nodealpha: float (1.0 by default)
the overall node alpha scaling of the plot, [0, 1]
edgealpha: float (1.0 by default)
the overall edge alpha scaling of the plot, [0, 1]
nodecolor: string or RGB ('r' by default)
edgecolor: string or RGB ('k' by default)
setcolor: string or RGB ('y' by default)
nodemarker: string ('o' by default)
nodesize: float (5.0 by default)
linewidth: float (1.0 by default)
axs,fig: None,None (default)
by default it will create a new figure, or this will plot in axs if not None.
values: Sequence[float] (None by default)
used to determine node colors in a colormap, should have the same length as coords
valuecenter: often used with values together to determine vmin and vmax of colormap
offset = max(abs(values-valuecenter))
vmax = valuecenter + offset
vmin = valuecenter - offset
cm: string or colormap object (None by default)
figsize: tuple (None by default)
angle: float (30 by default)
set initial view angle when drawing 3d
Returns
-------
A GraphDrawing object
"""
drawing = GraphDrawing(self,coords,ax=axs,figsize=figsize)
if values is not None:
values = np.asarray(values)
if values.ndim == 2:
node_color_list = np.reshape(values,len(coords))
else:
node_color_list = values
vmin = min(node_color_list)
vmax = max(node_color_list)
if cm is not None:
cm = plt.get_cmap(cm)
else:
if valuecenter is not None:
#when both values and valuecenter are provided, use PuOr colormap to determine colors
cm = plt.get_cmap("PuOr")
offset = max(abs(node_color_list-valuecenter))
vmax = valuecenter + offset
vmin = valuecenter - offset
else:
cm = plt.get_cmap("magma")
self._plotting(drawing,edgecolor,edgealpha,linewidth,len(coords[0])==3,c=node_color_list,alpha=alpha*nodealpha,
edgecolors='none',s=nodesize,marker=nodemarker,zorder=2,cmap=cm,vmin=vmin,vmax=vmax)
else:
self._plotting(drawing,edgecolor,edgealpha,linewidth,len(coords[0])==3,c=nodecolor,alpha=alpha*nodealpha,
edgecolors='none',s=nodesize,marker=nodemarker,zorder=2)
return drawing
def draw_groups(self,coords,groups,alpha=1.0,nodesize_list=[],linewidth=1,
nodealpha=1.0,edgealpha=0.01,edgecolor='k',nodemarker_list=[],node_color_list=[],nodeorder_list=[],axs=None,
fig=None,cm=None,angle=30,figsize=None):
"""
Standard drawing function when having multiple clusters
Parameters
----------
coords: a n-by-2 or n-by-3 array with coordinates for each node of the graph.
groups: list[list] or list, for the first case, each sublist represents a cluster
for the second case, list must have the same length as the number of nodes and
nodes with the number are in the same cluster
Optional parameters
------------------
alpha: float (1.0 by default)
the overall alpha scaling of the plot, [0,1]
nodealpha: float (1.0 by default)
the overall node alpha scaling of the plot, [0, 1]
edgealpha: float (1.0 by default)
the overall edge alpha scaling of the plot, [0, 1]
nodecolor_list: list of string or RGB ('r' by default)
edgecolor: string or RGB ('k' by default)
nodemarker_list: list of strings ('o' by default)
nodesize_list: list of floats (5.0 by default)
linewidth: float (1.0 by default)
axs,fig: None,None (default)
by default it will create a new figure, or this will plot in axs if not None.
cm: string or colormap object (None by default)
figsize: tuple (None by default)
angle: float (30 by default)
set initial view angle when drawing 3d
Returns
-------
A GraphDrawing object
"""
#when values are not provided, use tab20 or gist_ncar colormap to determine colors
number_of_colors = 1
l_initial_node_color_list = len(node_color_list)
l_initial_nodesize_list = len(nodesize_list)
l_initial_nodemarker_list = len(nodemarker_list)
l_initial_nodeorder_list = len(nodeorder_list)
if l_initial_node_color_list == 0:
node_color_list = np.zeros(self._num_vertices)
if l_initial_nodesize_list == 0:
nodesize_list = 25*np.ones(self._num_vertices)
if l_initial_nodemarker_list == 0:
nodemarker_list = 'o'
if l_initial_nodeorder_list == 0:
nodeorder_list = 2
groups = np.asarray(groups)
if groups.ndim == 1:
#convert 1-d group to a 2-d representation
grp_dict = defaultdict(list)
for idx,key in enumerate(groups):
grp_dict[key].append(idx)
groups = np.asarray(list(grp_dict.values()))
number_of_colors += len(groups)
#separate the color for different groups as far as we can
if l_initial_node_color_list == 0:
for i,g in enumerate(groups):
node_color_list[g] = (1+i)*1.0/(number_of_colors-1)
if number_of_colors <= 20:
cm = plt.get_cmap("tab20b")
else:
cm = plt.get_cmap("gist_ncar")
vmin = 0.0
vmax = 1.0
drawing = GraphDrawing(self,coords,ax=axs,figsize=figsize)
#m = ScalarMappable(norm=Normalize(vmin=vmin,vmax=vmax), cmap=cm)
#rgba_list = m.to_rgba(node_color_list,alpha=alpha*nodealpha)
self._plotting(drawing,edgecolor,edgealpha,linewidth,len(coords[0])==3,s=nodesize_list,marker=nodemarker_list,zorder=nodeorder_list,cmap=cm,vmin=vmin,vmax=vmax,alpha=alpha*nodealpha,edgecolors='none',c=node_color_list)
return drawing
"""
def draw_2d(self,pos,axs,cm,nodemarker='o',nodesize=5,edgealpha=0.01,linewidth=1,
node_color_list=None,edgecolor='k',nodecolor='r',node_list=None,nodelist_in=None,
nodelist_out=None,setalpha=1.0,nodealpha=1.0,use_values=False,vmin=0.0,vmax=1.0):
if use_values:
axs.scatter([p[0] for p in pos[nodelist_in]],[p[1] for p in pos[nodelist_in]],c=node_color_list[nodelist_in],
s=nodesize,marker=nodemarker,cmap=cm,norm=Normalize(vmin=vmin,vmax=vmax),alpha=setalpha,zorder=2)
axs.scatter([p[0] for p in pos[nodelist_out]],[p[1] for p in pos[nodelist_out]],c=node_color_list[nodelist_out],
s=nodesize,marker=nodemarker,cmap=cm,norm=Normalize(vmin=vmin,vmax=vmax),alpha=nodealpha,zorder=2)
else:
if node_color_list is not None:
axs.scatter([p[0] for p in pos],[p[1] for p in pos],c=node_color_list,s=nodesize,marker=nodemarker,
cmap=cm,norm=Normalize(vmin=vmin,vmax=vmax),zorder=2)
else:
axs.scatter([p[0] for p in pos],[p[1] for p in pos],c=nodecolor,s=nodesize,marker=nodemarker,zorder=2)
node_list = range(self._num_vertices) if node_list is None else node_list
edge_pos = []
for i in node_list:
self._push_edges_for_node(i,self.aj[self.ai[i]:self.ai[i+1]],pos,edge_pos)
edge_pos = np.asarray(edge_pos)
edge_collection = LineCollection(edge_pos,colors=to_rgba(edgecolor,edgealpha),linewidths=linewidth)
#make sure edges are at the bottom
edge_collection.set_zorder(1)
axs.add_collection(edge_collection)
axs.autoscale()
def draw_3d(self,pos,axs,cm,nodemarker='o',nodesize=5,edgealpha=0.01,linewidth=1,
node_color_list=None,angle=30,edgecolor='k',nodecolor='r',node_list=None,
nodelist_in=None,nodelist_out=None,setalpha=1.0,nodealpha=1.0,use_values=False,vmin=0.0,vmax=1.0):
if use_values:
axs.scatter([p[0] for p in pos[nodelist_in]],[p[1] for p in pos[nodelist_in]],[p[2] for p in pos[nodelist_in]],c=node_color_list[nodelist_in],
s=nodesize,marker=nodemarker,cmap=cm,norm=Normalize(vmin=vmin,vmax=vmax),zorder=2,alpha=setalpha)
axs.scatter([p[0] for p in pos[nodelist_out]],[p[1] for p in pos[nodelist_out]],[p[2] for p in pos[nodelist_out]],c=node_color_list[nodelist_out],
s=nodesize,marker=nodemarker,cmap=cm,norm=Normalize(vmin=vmin,vmax=vmax),zorder=2,alpha=nodealpha)
else:
if node_color_list is not None:
axs.scatter([p[0] for p in pos],[p[1] for p in pos],[p[2] for p in pos],c=node_color_list,
s=nodesize,marker=nodemarker,cmap=cm,norm=Normalize(vmin=vmin,vmax=vmax),zorder=2)
else:
axs.scatter([p[0] for p in pos],[p[1] for p in pos],[p[2] for p in pos],c=nodecolor,
s=nodesize,marker=nodemarker,zorder=2)
node_list = range(self._num_vertices) if node_list is None else node_list
edge_pos = []
for i in node_list:
self._push_edges_for_node(i,self.aj[self.ai[i]:self.ai[i+1]],pos,edge_pos)
edge_pos = np.asarray(edge_pos)
edge_collection = Line3DCollection(edge_pos,colors=to_rgba(edgecolor,edgealpha),linewidths=linewidth)
#make sure edges are at the bottom
edge_collection.set_zorder(1)
axs.add_collection(edge_collection)
axs.autoscale()
# Set the initial view
axs.view_init(30, angle)
"""
|
skidl/netlist_to_skidl_main.py | vkleen/skidl | 700 | 12693878 | <reponame>vkleen/skidl
# -*- coding: utf-8 -*-
# The MIT License (MIT) - Copyright (c) 2016-2021 <NAME>.
"""
Command-line program to convert a netlist into an equivalent SKiDL program.
"""
from __future__ import ( # isort:skip
absolute_import,
division,
print_function,
unicode_literals,
)
import argparse
import logging
import os
import shutil
import sys
from builtins import open
from future import standard_library
from .netlist_to_skidl import netlist_to_skidl
from .pckg_info import __version__
standard_library.install_aliases()
###############################################################################
# Command-line interface.
###############################################################################
def main():
parser = argparse.ArgumentParser(
description="A Python package for textually describing circuit schematics."
)
parser.add_argument(
"--version", "-v", action="version", version="skidl " + __version__
)
parser.add_argument(
"--input",
"-i",
nargs=1,
type=str,
metavar="file.net",
help="Netlist input file.",
)
parser.add_argument(
"--output",
"-o",
nargs=1,
type=str,
metavar="file.py",
help="Output file for SKiDL code.",
)
parser.add_argument(
"--overwrite", "-w", action="store_true", help="Overwrite an existing file."
)
parser.add_argument(
"--nobackup",
"-nb",
action="store_true",
help="Do *not* create backups before modifying files. "
+ "(Default is to make backup files.)",
)
parser.add_argument(
"--debug",
"-d",
nargs="?",
type=int,
default=0,
metavar="LEVEL",
help="Print debugging info. (Larger LEVEL means more info.)",
)
args = parser.parse_args()
logger = logging.getLogger("netlist_to_skidl")
if args.debug is not None:
log_level = logging.DEBUG + 1 - args.debug
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(log_level)
logger.addHandler(handler)
logger.setLevel(log_level)
if args.input is None:
logger.critical("Hey! Give me some netlist files!")
sys.exit(2)
if args.output is None:
print("Hey! I need some place where I can store the SKiDL code!")
sys.exit(1)
for file in args.output:
if os.path.isfile(file):
if not args.overwrite and args.nobackup:
logger.critical(
"File {} already exists! Use the --overwrite option to "
+ "allow modifications to it or allow backups.".format(file)
)
sys.exit(1)
if not args.nobackup:
# Create a backup file.
index = 1 # Start with this backup file suffix.
while True:
backup_file = file + ".{}.bak".format(index, file)
if not os.path.isfile(backup_file):
# Found an unused backup file name, so make backup.
shutil.copy(file, backup_file)
break # Backup done, so break out of loop.
index += 1 # Else keep looking for an unused backup file name.
skidl_code = netlist_to_skidl(args.input[0])
open(args.output[0], "w").write(skidl_code)
###############################################################################
# Main entrypoint.
###############################################################################
if __name__ == "__main__":
main()
|
njunmt/decoders/transformer_decoder.py | whr94621/NJUNMT-tf | 111 | 12693883 | <filename>njunmt/decoders/transformer_decoder.py
# Copyright 2017 Natural Language Processing Group, Nanjing University, <EMAIL>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Implement transformer decoder as described in https://arxiv.org/abs/1706.03762. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.util import nest
from collections import namedtuple
from njunmt.utils.constants import ModeKeys
from njunmt.decoders.decoder import dynamic_decode
from njunmt.decoders.decoder import initialize_cache
from njunmt.decoders.decoder import Decoder
from njunmt.layers.common_layers import dropout_wrapper
from njunmt.layers.common_layers import layer_preprocess
from njunmt.layers.common_layers import layer_postprocessing
from njunmt.layers.common_layers import transformer_ffn_layer
from njunmt.layers.common_attention import MultiHeadAttention
from njunmt.layers.common_attention import attention_bias_lower_triangle
class TransformerDecoder(Decoder):
""" Implement transformer decoder as described
in https://arxiv.org/abs/1706.03762. """
def __init__(self,
params,
mode,
name=None,
verbose=True):
""" Initializes decoder parameters.
Args:
params: A dictionary of parameters to construct the
decoder architecture.
mode: A mode.
name: The name of this decoder.
verbose: Print decoder parameters if set True.
"""
super(TransformerDecoder, self).__init__(params, mode, name, verbose)
self._self_attention_layers = []
self._encdec_attention_layers = []
for layer in range(self.params["num_layers"]):
self._self_attention_layers.append(
MultiHeadAttention(self.params["selfattention.params"], self.mode))
self._encdec_attention_layers.append(
MultiHeadAttention(self.params["attention.params"], self.mode))
if self.mode == ModeKeys.TRAIN:
self._DecoderOutputSpec = namedtuple(
"TransformerOutput", "decoder_hidden")
elif self.mode == ModeKeys.EVAL:
self._DecoderOutputSpec = namedtuple(
"TransformerOutput", "decoder_hidden decoder_self_attention encoder_decoder_attention")
else:
self._DecoderOutputSpec = namedtuple(
"TransformerOutput", "decoder_hidden encoder_decoder_attention")
@staticmethod
def default_params():
""" Returns a dictionary of default parameters of TransformerDecoder. """
return {
"num_layers": 6,
"attention.params": {}, # Arbitrary parameters for the enc-dec attention layer
"selfattention.params": {}, # Arbitrary parameters for the self-attention layer
"num_filter_units": 2048,
"num_hidden_units": 512,
"dropout_relu_keep_prob": 0.9,
"layer_preprocess_sequence": "n",
"layer_postprocess_sequence": "da",
"layer_prepostprocess_dropout_keep_prob": 0.9
}
@property
def output_dtype(self):
""" Returns a `collections.namedtuple`,
the definition of decoder output types. """
if self.mode == ModeKeys.TRAIN:
return self._DecoderOutputSpec(
decoder_hidden=tf.float32)
elif self.mode == ModeKeys.EVAL:
return self._DecoderOutputSpec(
decoder_hidden=tf.float32,
decoder_self_attention=[tf.float32] * self.params["num_layers"],
encoder_decoder_attention=[tf.float32] * self.params["num_layers"])
else:
return self._DecoderOutputSpec(
decoder_hidden=tf.float32,
encoder_decoder_attention=[tf.float32] * self.params["num_layers"])
def merge_top_features(self, decoder_output):
""" Merges features of decoder top layers, as the input
of softmax layer.
Here is the same as the hidden state of the last layer
of the transformer decoder.
Args:
decoder_output: An instance of `collections.namedtuple`
whose element types are defined by `output_dtype`
property.
Returns: A instance of `tf.Tensor`, as the input of
softmax layer.
"""
return decoder_output.decoder_hidden
def decode(self, encoder_output, bridge, helper,
target_to_embedding_fn,
outputs_to_logits_fn,
**kwargs):
""" Decodes one sample.
Args:
encoder_output: An instance of `collections.namedtuple`
from `Encoder.encode()`.
bridge: None.
helper: An instance of `Feedback` that samples next
symbols from logits.
target_to_embedding_fn: A callable, converts target ids to
embeddings.
outputs_to_logits_fn: A callable, converts decoder outputs
to logits.
Returns: A tuple `(decoder_output, decoder_status)`. The
`decoder_output` is an instance of `collections.namedtuple`
whose element types are defined by `output_dtype` property.
For mode=INFER, the `decoder_status` is a dict containing
hypothesis, log probabilities, beam ids and decoding length.
For mode=TRAIN/EVAL, the `decoder_status` is a `tf.Tensor`
indicating logits (computed by `target_modality`), of shape
[timesteps, batch_size, vocab_size].
"""
if bridge is not None and self.verbose:
tf.logging.info(
"TransformerDecoder ignores bridge: {}".format(bridge.name))
if self.mode == ModeKeys.TRAIN or self.mode == ModeKeys.EVAL:
assert hasattr(helper, "label_ids"), (
"helper ({}) for TransformerDecoder when mode=TRAIN/EVAL "
"should provide attr \"label_ids\"".format(type(helper)))
# prepare decoder input
label_ids = getattr(helper, "label_ids") # [batch_size, max_len_trg]
batch_size = tf.shape(label_ids)[0]
# shift
target_sos_ids = tf.tile([helper.vocab.sos_id], [batch_size])
target_sos_ids = tf.reshape(target_sos_ids, [batch_size, 1])
label_ids = tf.concat([target_sos_ids, label_ids], axis=1)[:, :-1]
decoder_inputs = target_to_embedding_fn(label_ids)
with tf.variable_scope(self.name):
cache = self.prepare(encoder_output, None, helper)
outputs, decoder_self_attention, encdec_attention \
= self._transform(decoder_inputs, cache) # [batch_size, time, dim]
if self.mode == ModeKeys.TRAIN:
final_outputs = self._DecoderOutputSpec(
decoder_hidden=outputs)
else:
final_outputs = self._DecoderOutputSpec(
decoder_hidden=outputs,
# transpose to [length_q, batch_size, num_heads length_k]
decoder_self_attention=nest.map_structure(
lambda x: tf.transpose(x, [2, 0, 1, 3]), decoder_self_attention),
encoder_decoder_attention=nest.map_structure(
lambda x: tf.transpose(x, [2, 0, 1, 3]), encdec_attention))
decoder_top_features = self.merge_top_features(final_outputs)
# do transpose to fit loss function, [time, batch_size, dim]
decoder_top_features = tf.transpose(decoder_top_features, [1, 0, 2])
logits = outputs_to_logits_fn(decoder_top_features) # [time, batch_size, vocab_size]
return final_outputs, logits
outputs, infer_status = dynamic_decode(
decoder=self, encoder_output=encoder_output,
bridge=None, helper=helper,
target_to_embedding_fn=target_to_embedding_fn,
outputs_to_logits_fn=outputs_to_logits_fn,
**kwargs)
return outputs, infer_status
def prepare(self, encoder_output, bridge, helper):
""" Prepares for `step()` function.
Do
1. acquire attention information from `encoder_output`;
Args:
encoder_output: An instance of `collections.namedtuple`
from `Encoder.encode()`.
bridge: None.
helper: An instance of `Feedback` that samples next
symbols from logits.
Returns: A dict containing decoder RNN states, pre-projected attention
keys, attention values and attention length, and will be passed
to `step()` function.
"""
_ = bridge
attention_values = encoder_output.attention_values
attention_length = encoder_output.attention_length
if hasattr(encoder_output, "attention_bias"):
attention_bias = encoder_output.attention_bias
else:
attention_bias = MultiHeadAttention.attention_length_to_bias(
tf.shape(attention_values)[1], attention_length)
# initialize cache
if self.mode == ModeKeys.INFER:
decoding_states = {}
batch_size = tf.shape(attention_values)[0]
depth = self._self_attention_layers[0].attention_value_depth
if depth < 0:
# TODO please check when code goes into this condition
depth = tf.shape(attention_values)[2]
# initialize decoder self attention keys/values
for l in range(self.params["num_layers"]):
keys = tf.zeros([batch_size, 0, depth])
values = tf.zeros([batch_size, 0, depth])
# Ensure shape invariance for tf.while_loop.
keys.set_shape([None, None, depth])
values.set_shape([None, None, depth])
with tf.variable_scope("layer_%d" % l):
with tf.variable_scope("encdec_attention"):
with tf.variable_scope(self._encdec_attention_layers[l].name):
preproj_keys, preproj_values = self._encdec_attention_layers[l] \
.compute_kv(attention_values)
decoding_states["layer_{}".format(l)] = {
"self_attention": {"keys": keys, "values": values},
"encdec_attention": {"attention_keys": preproj_keys,
"attention_values": preproj_values}}
else:
decoding_states = None
init_cache = initialize_cache(
decoding_states=decoding_states,
memory=attention_values,
memory_bias=attention_bias)
return init_cache
def step(self, decoder_input, cache):
""" Decodes one step.
Args:
decoder_input: The decoder input for this timestep.
A Tensor, with shape [batch_size, dmodel].
cache: A dict containing decoding states at previous
timestep, attention values and attention length.
Returns: A tuple `(cur_decoder_outputs, cur_cache)` at this timestep.
The `cur_decoder_outputs` must be an instance of `collections.namedtuple`
whose element types are defined by `output_dtype` property. The
`cur_cache` must have the same structure with `cache`.
"""
# decoder self attention: [batch_size, num_heads, length_q, length_k]
outputs, decoder_self_attention, encdec_attention = \
self._transform(tf.expand_dims(decoder_input, axis=1), cache)
final_outputs = self._DecoderOutputSpec(
decoder_hidden=outputs[:, -1, :],
# decoder_self_attention=[tf.squeeze(att, axis=2) for att in decoder_self_attention],
encoder_decoder_attention=[tf.squeeze(att, axis=2) for att in encdec_attention])
# loop on decoder_state, actually it is not used
return final_outputs, cache
def _transform(self, decoder_inputs, cache, pad_remover=None):
""" Decodes one step
Args:
decoder_inputs: The decoder input for this timestep,
A Tensor, with shape [batch_size, timesteps, dmodel].
Note that when mode==INFER, timesteps=1.
cache: A dict containing decoding states at previous
timestep, attention values and attention length.
pad_remover: An expert_utils.PadRemover object tracking the padding
positions. If provided, the padding is removed before applying
the convolution, and restored afterward. This can give a significant
speedup (says Google's tensor2tensor code).
Returns: A transformed Tensor.
"""
# [batch_size, max_len_src, dim]
encdec_attention_values = cache["memory"]
# [batch_size, 1, 1, max_len_src]
encdec_attention_bias = cache["memory_bias"]
decoder_self_attention_scores = []
encdec_attention_scores = []
# decoder_self_attention_bias: [1, 1, max_len_trg, max_len_trg]
decoder_self_attention_bias = attention_bias_lower_triangle(
tf.shape(decoder_inputs)[1])
x = dropout_wrapper(decoder_inputs, self.params["layer_prepostprocess_dropout_keep_prob"])
for layer in range(self.params["num_layers"]):
layer_name = "layer_{}".format(layer)
layer_cache = None if cache["decoding_states"] is None \
else cache["decoding_states"][layer_name]
selfatt_cache = None if layer_cache is None \
else layer_cache["self_attention"]
encdecatt_cache = None if layer_cache is None \
else layer_cache["encdec_attention"]
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("self_attention"):
# self attention layer
w_y, y = self._self_attention_layers[layer].build(
query=None,
memory=layer_preprocess(
x=x, process_sequence=self.params["layer_preprocess_sequence"],
dropout_keep_prob=self.params["layer_prepostprocess_dropout_keep_prob"]),
memory_bias=decoder_self_attention_bias,
cache=selfatt_cache)
# [batch_size, num_heads, length_q, length_k]
decoder_self_attention_scores.append(w_y)
# apply dropout, layer norm, residual
x = layer_postprocessing(
x=y, previous_x=x,
process_sequence=self.params["layer_postprocess_sequence"],
dropout_keep_prob=self.params["layer_prepostprocess_dropout_keep_prob"])
with tf.variable_scope("encdec_attention"):
# encoder-decoder attention
w_y, y = self._encdec_attention_layers[layer].build(
query=layer_preprocess(
x=x, process_sequence=self.params["layer_preprocess_sequence"],
dropout_keep_prob=self.params["layer_prepostprocess_dropout_keep_prob"]),
memory=encdec_attention_values,
memory_bias=encdec_attention_bias,
cache=encdecatt_cache)
# [batch_size, num_heads, length_q, length_k]
encdec_attention_scores.append(w_y)
# apply dropout, layer norm, residual
x = layer_postprocessing(
x=y, previous_x=x,
process_sequence=self.params["layer_postprocess_sequence"],
dropout_keep_prob=self.params["layer_prepostprocess_dropout_keep_prob"])
with tf.variable_scope("ffn"):
y = transformer_ffn_layer(
x=layer_preprocess(
x=x, process_sequence=self.params["layer_preprocess_sequence"],
dropout_keep_prob=self.params["layer_prepostprocess_dropout_keep_prob"]),
filter_size=self.params["num_filter_units"],
output_size=self.params["num_hidden_units"],
pad_remover=pad_remover,
dropout_relu_keep_prob=self.params["dropout_relu_keep_prob"])
# apply dropout, layer norm, residual
x = layer_postprocessing(
x=y, previous_x=x,
process_sequence=self.params["layer_postprocess_sequence"],
dropout_keep_prob=self.params["layer_prepostprocess_dropout_keep_prob"])
x = layer_preprocess(
x=x, process_sequence=self.params["layer_preprocess_sequence"],
dropout_keep_prob=self.params["layer_prepostprocess_dropout_keep_prob"])
return x, decoder_self_attention_scores, encdec_attention_scores
|
migrations/versions/55c778dd35ab_add_default_notebook_to_users.py | snowdensb/braindump | 631 | 12693886 | <gh_stars>100-1000
"""add default notebook to users
Revision ID: <KEY>
Revises: <KEY>
Create Date: 2016-12-17 21:24:03.788486
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('default_notebook', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'default_notebook')
# ### end Alembic commands ###
|
src/genie/libs/parser/iosxe/tests/ShowPolicyMapType/empty/golden_output10_expected.py | balmasea/genieparser | 204 | 12693913 | expected_output = {
"GigabitEthernet0/1/1": {
"service_policy": {
"output": {
"policy_name": {
"shape-out": {
"class_map": {
"class-default": {
"bytes": 0,
"bytes_output": 0,
"match": ["any"],
"match_evaluation": "match-any",
"no_buffer_drops": 0,
"packets": 0,
"pkts_output": 0,
"queue_depth": 0,
"queue_limit_packets": "64",
"queueing": True,
"rate": {
"drop_rate_bps": 0,
"interval": 300,
"offered_rate_bps": 0,
},
"shape_bc_bps": 2000,
"shape_be_bps": 2000,
"shape_cir_bps": 500000,
"shape_type": "average",
"target_shape_rate": 500000,
"total_drops": 0,
}
}
}
}
}
}
}
}
|
starfish/core/spots/DetectPixels/_base.py | haoxusci/starfish | 164 | 12693916 | <gh_stars>100-1000
from abc import abstractmethod
from typing import Callable, Sequence, Tuple
import numpy as np
from starfish.core.imagestack.imagestack import ImageStack
from starfish.core.intensity_table.decoded_intensity_table import DecodedIntensityTable
from starfish.core.pipeline.algorithmbase import AlgorithmBase
from starfish.core.types import Number
from .combine_adjacent_features import ConnectedComponentDecodingResult
class DetectPixelsAlgorithm(metaclass=AlgorithmBase):
@abstractmethod
def run(
self,
primary_image: ImageStack,
*args,
) -> Tuple[DecodedIntensityTable, ConnectedComponentDecodingResult]:
"""Finds spots in an ImageStack"""
raise NotImplementedError()
@staticmethod
def _get_measurement_function(measurement_type: str) -> Callable[[Sequence], Number]:
try:
measurement_function = getattr(np, measurement_type)
except AttributeError:
raise ValueError(
f'measurement_type must be a numpy reduce function such as "max" or "mean". '
f'{measurement_type} not found.')
return measurement_function
|
anima/exc.py | MehmetErer/anima | 101 | 12693928 | # -*- coding: utf-8 -*-
"""This module contains exceptions
"""
class PublishError(RuntimeError):
"""Raised when the published version is not matching the quality
"""
pass
|
src/losses/center_loss.py | Talgin/facerec | 269 | 12693938 | <filename>src/losses/center_loss.py
import os
# MXNET_CPU_WORKER_NTHREADS must be greater than 1 for custom op to work on CPU
#os.environ['MXNET_CPU_WORKER_NTHREADS'] = '2'
import mxnet as mx
# define metric of accuracy
class Accuracy(mx.metric.EvalMetric):
def __init__(self, num=None):
super(Accuracy, self).__init__('accuracy', num)
def update(self, labels, preds):
mx.metric.check_label_shapes(labels, preds)
if self.num is not None:
assert len(labels) == self.num
pred_label = mx.nd.argmax_channel(preds[0]).asnumpy().astype('int32')
label = labels[0].asnumpy().astype('int32')
mx.metric.check_label_shapes(label, pred_label)
self.sum_metric += (pred_label.flat == label.flat).sum()
self.num_inst += len(pred_label.flat)
# define some metric of center_loss
class CenterLossMetric(mx.metric.EvalMetric):
def __init__(self):
super(CenterLossMetric, self).__init__('center_loss')
def update(self, labels, preds):
self.sum_metric += preds[1].asnumpy()[0]
self.num_inst += 1
# see details:
# <A Discriminative Feature Learning Approach for Deep Face Recogfnition>
class CenterLoss(mx.operator.CustomOp):
def __init__(self, ctx, shapes, dtypes, num_class, alpha, scale=1.0):
if not len(shapes[0]) == 2:
raise ValueError('dim for input_data shoudl be 2 for CenterLoss')
self.alpha = alpha
self.batch_size = shapes[0][0]
self.num_class = num_class
self.scale = scale
def forward(self, is_train, req, in_data, out_data, aux):
labels = in_data[1].asnumpy()
diff = aux[0]
center = aux[1]
# store x_i - c_yi
for i in range(self.batch_size):
diff[i] = in_data[0][i] - center[int(labels[i])]
loss = mx.nd.sum(mx.nd.square(diff)) / self.batch_size / 2
self.assign(out_data[0], req[0], loss)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
diff = aux[0]
center = aux[1]
sum_ = aux[2]
# back grad is just scale * ( x_i - c_yi)
grad_scale = float(self.scale/self.batch_size)
self.assign(in_grad[0], req[0], diff * grad_scale)
# update the center
labels = in_data[1].asnumpy()
label_occur = dict()
for i, label in enumerate(labels):
label_occur.setdefault(int(label), []).append(i)
for label, sample_index in label_occur.items():
sum_[:] = 0
for i in sample_index:
sum_ = sum_ + diff[i]
delta_c = sum_ / (1 + len(sample_index))
center[label] += self.alpha * delta_c
@mx.operator.register("centerloss")
class CenterLossProp(mx.operator.CustomOpProp):
def __init__(self, num_class, alpha, scale=1.0, batchsize=64):
super(CenterLossProp, self).__init__(need_top_grad=False)
# convert it to numbers
self.num_class = int(num_class)
self.alpha = float(alpha)
self.scale = float(scale)
self.batchsize = int(batchsize)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
# call them 'bias' for zero initialization
return ['diff_bias', 'center_bias', 'sum_bias']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = (in_shape[0][0],)
# store diff , same shape as input batch
diff_shape = [self.batchsize, data_shape[1]]
# store the center of each class , should be ( num_class, d )
center_shape = [self.num_class, diff_shape[1]]
# computation buf
sum_shape = [diff_shape[1],]
output_shape = [1, ]
return [data_shape, label_shape], [output_shape], [diff_shape, center_shape, sum_shape]
def create_operator(self, ctx, shapes, dtypes):
return CenterLoss(ctx, shapes, dtypes, self.num_class, self.alpha, self.scale)
|
misc/rando.py | jamesray1/casper | 752 | 12693986 | <reponame>jamesray1/casper
from ethereum.tools import tester
from ethereum.utils import sha3, normalize_address
c = tester.Chain()
x = c.contract(open('rando.v.py').read(), language='vyper')
for i in range(10):
x.deposit(sender=tester.keys[i], value=(i+1)*10**15)
c.mine(1)
o = [0] * 10
for i in range(550):
addr = normalize_address(x.random_select(sha3(str(i))))
o[tester.accounts.index(addr)] += 1
for i, v in enumerate(o):
ev = 10*(i+1)
if not ev - 4*ev**0.5 < v < ev + 4*ev**0.5:
raise Exception("More than four standard deviations away; something is wrong: %.2f %d %.2f" %
(ev - 4*ev**0.5, v, ev + 4*ev**0.5))
print(o)
|
15Flask/day04/demo/demo6_unit_test.py | HaoZhang95/PythonAndMachineLearning | 937 | 12694000 | <filename>15Flask/day04/demo/demo6_unit_test.py
import unittest
from demo.demo5 import app
class DatabaseTestCase(unittest.TestCase):
"""
测试数据库的添加i和删除,需要设置一个单独的数据库,不能使用真实的数据库
"""
def setUp(self):
"""单元测试开始之前执行的操作"""
app.config['TESTING'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:mysql@localhost/test0'
self.app = app
db.create_all()
def tearDown(self):
"""单元测试结束后执行的操作"""
# db.session类似于数据库的连接
db.session.remove()
db.drop_all()
def test_append_data(self):
""""""
au = Author(name='itcast')
bk = Book(info='python')
db.session.add_all([au, bk])
db.session.commit()
author = Author.query.filter_by(name='itcast').first()
book = Book.query.filter_by(info='python').first()
#断言数据存在
self.assertIsNotNone(author)
self.assertIsNotNone(book) |
pypowerbi/imports.py | MoorsTech/pypowerbi | 101 | 12694012 | <reponame>MoorsTech/pypowerbi
# -*- coding: future_fstrings -*-
import requests
import json
import urllib
import re
from requests.exceptions import HTTPError
from .import_class import Import
class Imports:
# url snippets
groups_snippet = 'groups'
imports_snippet = 'imports'
dataset_displayname_snippet = 'datasetDisplayName'
nameconflict_snippet = 'nameConflict'
def __init__(self, client):
self.client = client
self.base_url = f'{self.client.api_url}/{self.client.api_version_snippet}/{self.client.api_myorg_snippet}'
self.upload_file_replace_regex = re.compile('(?![A-z]|[0-9]).')
@classmethod
def import_from_response(cls, response):
response_dict = json.loads(response.text)
return Import.from_dict(response_dict)
@classmethod
def imports_from_response(cls, response):
response_list = json.loads(response.text).get(Import.value_key)
return [Import.from_dict(x) for x in response_list]
def upload_file(self, filename, dataset_displayname, nameconflict=None, group_id=None):
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# substitute using the regex pattern
prepared_displayname = re.sub(self.upload_file_replace_regex, '-', dataset_displayname)
# append the pbix extension (strange yes, but names correctly in powerbi service if so)
prepared_displayname = f'{prepared_displayname}.pbix'
url = f'{self.base_url}{groups_part}{self.imports_snippet}' \
f'?{urllib.parse.urlencode({self.dataset_displayname_snippet : prepared_displayname})}'
if nameconflict is not None:
url = url + f'&{self.nameconflict_snippet}={nameconflict}'
headers = self.client.auth_header
try:
with open(filename, 'rb') as file_obj:
response = requests.post(url, headers=headers,
files={
'file': file_obj,
})
except TypeError:
# assume filename is a file-like object already
response = requests.post(url, headers=headers,
files={
'file': filename,
})
# 200 OK
if response.status_code == 200:
import_object = self.import_from_response(response)
# 202 Accepted
elif response.status_code == 202:
import_object = self.import_from_response(response)
# 490 Conflict (due to name)
elif response.status_code == 409:
raise NotImplementedError("Name conflict resolution not implemented yet")
else:
raise HTTPError(response, f"Upload file failed with status code: {response.json()}")
return import_object
def get_import(self, import_id, group_id=None):
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
url = f'{self.base_url}{groups_part}{self.imports_snippet}/{import_id}'
headers = self.client.auth_header
response = requests.get(url, headers=headers)
# 200 OK
if response.status_code == 200:
import_object = self.import_from_response(response)
else:
raise HTTPError(response, f"Get import failed with status code: {response.json()}")
return import_object
def get_imports(self, group_id=None):
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
url = f'{self.base_url}{groups_part}{self.imports_snippet}'
headers = self.client.auth_header
response = requests.get(url, headers=headers)
# 200 OK
if response.status_code == 200:
import_object = self.imports_from_response(response)
else:
raise HTTPError(response, f"Get imports failed with status code: {response.json()}")
return import_object
|
tests/__init__.py | robin-173/sayn | 105 | 12694093 | from contextlib import contextmanager
import os
from pathlib import Path
import subprocess
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from ruamel.yaml import YAML
from sayn.database.creator import create as create_db
@contextmanager
def inside_dir(dirpath, fs=dict()):
"""
Execute code from inside the given directory
:param dirpath: String, path of the directory the command is being run.
"""
old_path = os.getcwd()
try:
os.chdir(dirpath)
for filepath, content in fs.items():
fpath = Path(filepath)
fpath.parent.mkdir(parents=True, exist_ok=True)
fpath.write_text(content)
yield
finally:
os.chdir(old_path)
@contextmanager
def create_project(dirpath, settings=None, project=None, groups=dict(), env=dict()):
"""
Execute code from inside the given directory, creating the sayn project files
:param settings: String, yaml for a settings.yaml file
:param project: String, yaml for a project.yaml file
:param groups: Dict, dict of yaml for the contents of the tasks folder
"""
old_path = os.getcwd()
try:
os.chdir(dirpath)
if settings is not None:
Path(dirpath, "settings.yaml").write_text(settings)
if project is not None:
Path(dirpath, "project.yaml").write_text(project)
if len(groups) > 0:
for name, group in groups.items():
Path(dirpath, f"{name}.yaml").write_text(group)
if len(env) > 0:
os.environ.update(env)
yield
finally:
os.chdir(old_path)
for k in env.keys():
del os.environ[k]
def run_sayn(*args):
return subprocess.check_output(
f"sayn {' '.join(args)}", shell=True, stderr=subprocess.STDOUT
)
# Task Simulators
# create empty tracker class to enable the run to go through
class VoidTracker:
def set_run_steps(self, steps):
pass
def start_step(self, step):
pass
def finish_current_step(self):
pass
vd = VoidTracker()
def simulate_task(
task, source_db=None, target_db=None, run_arguments=dict(), task_params=dict()
):
task.name = "test_task" # set for compilation output during run
task.group = "test_group" # set for compilation output during run
task.run_arguments = {
"folders": {"sql": "sql", "compile": "compile"},
"command": "run",
"debug": False,
"full_load": False,
**run_arguments,
}
if target_db is not None:
task.connections = {
"target_db": create_db("target_db", "target_db", target_db.copy())
}
if source_db is not None:
task.connections.update(
{"source_db": create_db("source_db", "source_db", source_db.copy())}
)
task._default_db = "target_db"
task.tracker = vd
task.jinja_env = Environment(
loader=FileSystemLoader(os.getcwd()),
undefined=StrictUndefined,
keep_trailing_newline=True,
)
task.jinja_env.globals.update(**task_params)
def validate_table(db, table_name, expected_data):
result = db.read_data(f"select * from {table_name}")
if len(result) != len(expected_data):
return False
result = sorted(result, key=lambda x: list(x.values()))
expected_data = sorted(expected_data, key=lambda x: list(x.values()))
for i in range(len(result)):
if result[i] != expected_data[i]:
return False
return True
@contextmanager
def tables_with_data(db, tables, extra_tables=list()):
tables_to_delete = extra_tables.copy()
for table, data in tables.items():
if isinstance(table, tuple):
schema = table[0]
table = table[1]
tables_to_delete.append(f"{schema}.{table}")
else:
schema = None
tables_to_delete.append(table)
db.load_data(table, data, schema=schema, replace=True)
try:
yield
finally:
clear_tables(db, tables_to_delete)
def clear_tables(db, tables):
for table in tables:
try:
db.execute(f"DROP TABLE IF EXISTS {table}")
except:
pass
try:
db.execute(f"DROP VIEW IF EXISTS {table}")
except:
pass
|
packages/syft/src/syft/core/tensor/smpc/mpc_tensor_ancestor.py | vishalbelsare/PySyft | 8,428 | 12694101 | <filename>packages/syft/src/syft/core/tensor/smpc/mpc_tensor_ancestor.py
# stdlib
from typing import Any
from typing import List
# relative
from ..manager import TensorChainManager
from .mpc_tensor import MPCTensor
from .utils import ispointer
class MPCTensorAncestor(TensorChainManager):
def share(self, *parties: List[Any]) -> MPCTensor:
# relative
from .mpc_tensor import MPCTensor
if ispointer(self.child):
raise ValueError(
"Cannot call share on a remote tensor. Use MPCTensor(remote_secret)"
)
return MPCTensor(secret=self.child, parties=list(parties))
|
glad/lang/volt/loader/gl.py | solarane/glad | 2,592 | 12694134 | <filename>glad/lang/volt/loader/gl.py
from glad.lang.common.loader import BaseLoader
from glad.lang.volt.loader import LOAD_OPENGL_DLL
from glad.lang.d.loader.gl import _OPENGL_HAS_EXT as _D_OPENGL_HAS_EXT
_OPENGL_LOADER = \
LOAD_OPENGL_DLL % {'pre':'private', 'init':'open_gl',
'proc':'get_proc', 'terminate':'close_gl'} + '''
bool gladLoadGL() {
StructToDg structToDg;
structToDg.func = cast(void*)get_proc;
auto dg = *cast(Loader*)&structToDg;
bool status = false;
if(open_gl()) {
status = gladLoadGL(dg);
close_gl();
}
return status;
}
'''
_OPENGL_HAS_EXT = (
'global int GL_MAJOR = 0;\nglobal int GL_MINOR = 0;' +
'\n'.join(l for l in _D_OPENGL_HAS_EXT.replace('@nogc', '').splitlines() if 'struct' not in l)
.replace('GLVersion.major', 'GL_MAJOR') +
'\n\n'
)
class OpenGLVoltLoader(BaseLoader):
def write_header_end(self, fobj):
pass
def write_header(self, fobj):
pass
def write(self, fobj):
fobj.write('import watt.library;\n')
if not self.disabled and 'gl' in self.apis:
fobj.write(_OPENGL_LOADER)
def write_begin_load(self, fobj):
fobj.write('\tglGetString = cast(typeof(glGetString))load("glGetString");\n')
fobj.write('\tif(glGetString is null) { return false; }\n')
fobj.write('\tif(glGetString(GL_VERSION) is null) { return false; }\n\n')
def write_end_load(self, fobj):
fobj.write('\treturn GL_MAJOR != 0 || GL_MINOR != 0;\n')
def write_find_core(self, fobj):
fobj.write('\tconst(char)* v = cast(const(char)*)glGetString(GL_VERSION);\n')
fobj.write('\tint major = v[0] - \'0\';\n')
fobj.write('\tint minor = v[2] - \'0\';\n')
fobj.write('\tGL_MAJOR = major; GL_MINOR = minor;\n')
def write_has_ext(self, fobj):
fobj.write(_OPENGL_HAS_EXT) |
builder/compat.py | f1oat/platform-atmelmegaavr | 515 | 12694215 | <filename>builder/compat.py
# Copyright 2014-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from SCons.Script import AlwaysBuild, Import
Import("env")
# Added in PIO Core 4.4.0
if not hasattr(env, "AddPlatformTarget"):
def AddPlatformTarget(
env,
name,
dependencies,
actions,
title=None,
description=None,
always_build=True,
):
target = env.Alias(name, dependencies, actions)
if always_build:
AlwaysBuild(target)
return target
env.AddMethod(AddPlatformTarget)
|
tests/pytests/unit/states/virt/test_helpers.py | babs/salt | 9,425 | 12694220 | from tests.support.mock import call
def network_update_call(
name,
bridge,
forward,
vport=None,
tag=None,
ipv4_config=None,
ipv6_config=None,
connection=None,
username=None,
password=<PASSWORD>,
mtu=None,
domain=None,
nat=None,
interfaces=None,
addresses=None,
physical_function=None,
dns=None,
test=False,
):
"""
Create a call object with the missing default parameters from virt.network_update()
"""
return call(
name,
bridge,
forward,
vport=vport,
tag=tag,
ipv4_config=ipv4_config,
ipv6_config=ipv6_config,
mtu=mtu,
domain=domain,
nat=nat,
interfaces=interfaces,
addresses=addresses,
physical_function=physical_function,
dns=dns,
test=test,
connection=connection,
username=username,
password=password,
)
def domain_update_call(
name,
cpu=None,
mem=None,
disk_profile=None,
disks=None,
nic_profile=None,
interfaces=None,
graphics=None,
connection=None,
username=None,
password=<PASSWORD>,
boot=None,
numatune=None,
boot_dev=None,
hypervisor_features=None,
clock=None,
serials=None,
consoles=None,
stop_on_reboot=False,
live=True,
host_devices=None,
test=False,
):
"""
Create a call object with the missing default parameters from virt.update()
"""
return call(
name,
cpu=cpu,
mem=mem,
disk_profile=disk_profile,
disks=disks,
nic_profile=nic_profile,
interfaces=interfaces,
graphics=graphics,
live=live,
connection=connection,
username=username,
password=password,
boot=boot,
numatune=numatune,
serials=serials,
consoles=consoles,
test=test,
boot_dev=boot_dev,
hypervisor_features=hypervisor_features,
clock=clock,
stop_on_reboot=stop_on_reboot,
host_devices=host_devices,
)
|
tests/views/test_meta_static_pages.py | priyanshu-kumar02/personfinder | 561 | 12694225 | # encoding: utf-8
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import view_tests_base
class MetaStaticPagesViewTests(view_tests_base.ViewTestsBase):
def setUp(self):
super(MetaStaticPagesViewTests, self).setUp()
self.data_generator.repo()
def test_get_homepage(self):
resp = self.client.get('/', secure=True)
self.assertTrue('You are now running Person Finder.' in resp.content)
# Legacy path for the homepage.
resp = self.client.get('/global/home.html', secure=True)
self.assertTrue('You are now running Person Finder.' in resp.content)
def test_get_responders_page(self):
resp = self.client.get('/global/responders.html', secure=True)
self.assertTrue('Information for responders' in resp.content)
resp = self.client.get('/global/responders.html?lang=ja', secure=True)
self.assertTrue('災害対応者向け情報' in resp.content)
def test_get_howto_page(self):
resp = self.client.get('/global/howto.html', secure=True)
self.assertTrue('from a PC or mobile phone.' in resp.content)
resp = self.client.get('/global/howto.html?lang=ja', secure=True)
self.assertTrue('自分の安否を伝える' in resp.content)
|
locobot/agent/perception/perception_helpers.py | kandluis/droidlet | 669 | 12694231 | <filename>locobot/agent/perception/perception_helpers.py<gh_stars>100-1000
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import colorsys
import random
import numpy as np
import base64
import io
import torch
import torchvision
import webcolors
from PIL import Image, ImageDraw
from torchvision import transforms
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
from collections import defaultdict
def random_colors(N, bright=True):
"""Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
colors = random_colors(20)
def get_random_color():
return colors[0]
def get_encoded_image(file):
with open(file, "rb") as image:
f = base64.b64encode(image.read())
return f
def get_decoded_image(fstr):
image = base64.b64decode(fstr)
return Image.open(io.BytesIO(image)).convert("RGB")
def get_np_decoded_image(enc):
a = base64.decodebytes(enc)
b = np.frombuffer(a, dtype=np.uint8)
print("decoded pros {}, {}".format(type(b), b.shape))
return Image.fromarray(b, "RGB")
def draw_xyz(img, xyz, cent):
d = ImageDraw.Draw(img)
d.text(cent, str(xyz[0]) + ",\n" + str(xyz[1]) + ",\n" + str(xyz[2]), fill=(255, 255, 255))
d.point(cent, fill=(255, 0, 0))
return img
def get_closest_color_name(requested_colour):
min_colours = {}
for key, name in webcolors.CSS3_HEX_TO_NAMES.items():
r_c, g_c, b_c = webcolors.hex_to_rgb(key)
rd = (r_c - requested_colour[0]) ** 2
gd = (g_c - requested_colour[1]) ** 2
bd = (b_c - requested_colour[2]) ** 2
min_colours[(rd + gd + bd)] = name
return min_colours[min(min_colours.keys())]
def get_color_tag(img, cent):
x = int(cent[0])
y = int(cent[1])
color = get_closest_color_name(img.getpixel((x, y)))
return color
def get_coords(masked, img, xyz, centers):
# decode
# xyz is in row-major, centers corresponds to the xy for the image (which is in column major)
# xyz = base64.decodebytes(enc_xyz)
# coords = np.frombuffer(xyz, dtype=np.float64).reshape((4,-1))
coords = np.around(xyz, decimals=2)
# print("Decode success ? {} \n{}".format(coords.shape, coords[:, :5]))
# print("Image size {}".format(img.size))
# map each x,y to a an index into coords
# img = draw_xyz(img, [0,0,0], (0,0))
marker_dict = defaultdict(int)
id = 4
for cent in centers:
xyz = coords[:3, cent[1] * img.size[0] + cent[0]] # what should this index be
# xyz = coords[:3, cent[0]*img.size[1] + cent[1]]
# what should this index be
# xyz = [xyz[1], xyz[0], xyz[2]]
marker_dict[id] = {
"position": [xyz[0], xyz[1], xyz[2]],
"color": get_closest_color_name(img.getpixel((int(cent[0]), int(cent[1])))),
}
masked = draw_xyz(masked, xyz, cent)
id += 1
# draw the coords on the img
return masked, marker_dict
|
Python/Interfacing_C_C++_Fortran/Pybind11/Stats/test.py | Gjacquenot/training-material | 115 | 12694271 | <reponame>Gjacquenot/training-material
#!/usr/bin/env python
import numpy as np
from stats import Statistics, compute_stats
n = 10
data = np.empty(n)
my_stats = Statistics()
for i in range(n):
value = 0.3*i
my_stats.add(value)
data[i] = value
print(my_stats.mean())
my_stats = compute_stats(data)
print(my_stats.mean())
|
webCrawler_scrapy/pipelines.py | lawlite19/PythonCrawler-Scrapy-Mysql-File-Template | 197 | 12694282 | <filename>webCrawler_scrapy/pipelines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from twisted.enterprise import adbapi
import MySQLdb
import MySQLdb.cursors
import codecs
import json
from logging import log
class JsonWithEncodingPipeline(object):
'''保存到文件中对应的class
1、在settings.py文件中配置
2、在自己实现的爬虫类中yield item,会自动执行'''
def __init__(self):
self.file = codecs.open('info.json', 'w', encoding='utf-8')#保存为json文件
def process_item(self, item, spider):
line = json.dumps(dict(item)) + "\n"#转为json的
self.file.write(line)#写入文件中
return item
def spider_closed(self, spider):#爬虫结束时关闭文件
self.file.close()
class WebcrawlerScrapyPipeline(object):
'''保存到数据库中对应的class
1、在settings.py文件中配置
2、在自己实现的爬虫类中yield item,会自动执行'''
def __init__(self,dbpool):
self.dbpool=dbpool
''' 这里注释中采用写死在代码中的方式连接线程池,可以从settings配置文件中读取,更加灵活
self.dbpool=adbapi.ConnectionPool('MySQLdb',
host='127.0.0.1',
db='crawlpicturesdb',
user='root',
passwd='<PASSWORD>',
cursorclass=MySQLdb.cursors.DictCursor,
charset='utf8',
use_unicode=False)'''
@classmethod
def from_settings(cls,settings):
'''1、@classmethod声明一个类方法,而对于平常我们见到的则叫做实例方法。
2、类方法的第一个参数cls(class的缩写,指这个类本身),而实例方法的第一个参数是self,表示该类的一个实例
3、可以通过类来调用,就像C.f(),相当于java中的静态方法'''
dbparams=dict(
host=settings['MYSQL_HOST'],#读取settings中的配置
db=settings['MYSQL_DBNAME'],
user=settings['MYSQL_USER'],
passwd=settings['<PASSWORD>_PASSWD'],
charset='utf8',#编码要加上,否则可能出现中文乱码问题
cursorclass=MySQLdb.cursors.DictCursor,
use_unicode=False,
)
dbpool=adbapi.ConnectionPool('MySQLdb',**dbparams)#**表示将字典扩展为关键字参数,相当于host=xxx,db=yyy....
return cls(dbpool)#相当于dbpool付给了这个类,self中可以得到
#pipeline默认调用
def process_item(self, item, spider):
query=self.dbpool.runInteraction(self._conditional_insert,item)#调用插入的方法
query.addErrback(self._handle_error,item,spider)#调用异常处理方法
return item
#写入数据库中
def _conditional_insert(self,tx,item):
#print item['name']
sql="insert into testtable(name,url) values(%s,%s)"
params=(item["name"],item["url"])
tx.execute(sql,params)
#错误处理方法
def _handle_error(self, failue, item, spider):
print '--------------database operation exception!!-----------------'
print '-------------------------------------------------------------'
print failue |
scripts/gt_mots_eval.py | zhuhu00/MOTSFusion_modify | 154 | 12694293 | <reponame>zhuhu00/MOTSFusion_modify
from file_io.import_utils import import_detections, import_segmentations
from file_io.export_utils import export_tracking_result_in_mots_format
from eval.mots_eval.eval import run_mots_eval
from eval.mots_eval.mots_common.io import load_seqmap
from config import Config
from tracker.tracked_sequence import TrackedSequence
import pycocotools.mask as cocomask
from eval.mots_eval.mots_common.io import load_txt
def import_gt_file(gt_path):
objects_per_frame = load_txt(gt_path)
print(sequence)
for frame in objects_per_frame.keys():
for object in objects_per_frame.get(frame):
if not object.track_id == 10000:
track_id = (object.track_id % 1000)
det = {'class': object.class_id}
while tracks_gt.get_num_ids() <= track_id:
tracks_gt.add_empty_track()
tracks_gt.add_to_track(frame, track_id, det, object.mask)
if __name__ == '__main__':
config = Config('./configs/config_default')
list_sequences, max_frames = load_seqmap(config.str('mots_seqmap_file'))
for sequence in list_sequences:
tracks_gt = TrackedSequence(max_frames[sequence]+1)
import_gt_file('./data/mots_gt/' + sequence + '.txt')
raw_detections = import_detections(config, sequence)
segmentations = import_segmentations(config, sequence)
tracks_gt_seg = TrackedSequence(max_frames[sequence]+1)
while max_frames[sequence]+1 > len(raw_detections):
raw_detections.append([])
while max_frames[sequence]+1 > len(segmentations):
segmentations.append([])
for step in range(tracks_gt.timesteps):
combined_mask_per_frame = {}
for gt_id in tracks_gt.get_active_tracks(step):
ref_mask = tracks_gt.get_mask(step, gt_id, decode=False)
ref_det = tracks_gt.get_detection(step, gt_id)
ref_class = ref_det['class']
for mask, det in zip(segmentations[step], raw_detections[step]):
# mask based (MOTS)
mask_iou = cocomask.area(cocomask.merge([mask, ref_mask], intersect=True)) / cocomask.area(cocomask.merge([mask, ref_mask]))
if mask_iou > 0.5:
while tracks_gt_seg.get_num_ids() <= gt_id:
tracks_gt_seg.add_empty_track()
tracks_gt_seg.add_to_track(step, gt_id, det, mask)
if step not in combined_mask_per_frame:
combined_mask_per_frame[step] = mask
else:
combined_mask_per_frame[step] = cocomask.merge([combined_mask_per_frame[step], mask],
intersect=False)
tracks_gt_seg.fix_mask_overlap()
export_tracking_result_in_mots_format(tracks_gt_seg, './scripts/gt_mots_eval/' + sequence + '/')
run_mots_eval('./scripts/gt_mots_eval/', list_sequences, config.dir('mots_gt_folder'), config.str('mots_seqmap_file'))
|
torch/testing/_internal/opinfo_helper.py | vuanvin/pytorch | 183 | 12694295 | <reponame>vuanvin/pytorch<filename>torch/testing/_internal/opinfo_helper.py
import collections
import warnings
from functools import partial
import torch
from torch.testing._internal.common_cuda import (TEST_CUDA)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
all_types_and_complex,
all_types_and_half,
all_types,
complex_types,
floating_and_complex_types,
floating_types_and_half,
floating_types,
integral_types,
floating_types_and,
floating_and_complex_types_and,
integral_types_and,
all_types_and,
_dispatch_dtypes,
)
COMPLETE_DTYPES_DISPATCH = (
all_types,
all_types_and_complex,
all_types_and_half,
floating_types,
floating_and_complex_types,
floating_types_and_half,
integral_types,
complex_types,
)
EXTENSIBLE_DTYPE_DISPATCH = (
all_types_and_complex_and,
floating_types_and,
floating_and_complex_types_and,
integral_types_and,
all_types_and,
)
# Better way to acquire devices?
DEVICES = ['cpu'] + (['cuda'] if TEST_CUDA else [])
class _dynamic_dispatch_dtypes(_dispatch_dtypes):
# Class to tag the dynamically generated types.
pass
def get_supported_dtypes(op, sample_inputs_fn, device_type):
# Returns the supported dtypes for the given operator and device_type pair.
assert device_type in ['cpu', 'cuda']
if not TEST_CUDA and device_type == 'cuda':
warnings.warn("WARNING: CUDA is not available, empty_dtypes dispatch will be returned!")
return _dynamic_dispatch_dtypes(())
supported_dtypes = set()
for dtype in all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half):
try:
samples = sample_inputs_fn(op, device_type, dtype, False)
except RuntimeError:
# If `sample_inputs_fn` doesn't support sampling for a given
# `dtype`, we assume that the `dtype` is not supported.
# We raise a warning, so that user knows that this was the case
# and can investigate if there was an issue with the `sample_inputs_fn`.
warnings.warn(f"WARNING: Unable to generate sample for device:{device_type} and dtype:{dtype}")
continue
# We assume the dtype is supported
# only if all samples pass for the given dtype.
supported = True
for sample in samples:
try:
op(sample.input, *sample.args, **sample.kwargs)
except RuntimeError as re:
# dtype is not supported
supported = False
break
if supported:
supported_dtypes.add(dtype)
return _dynamic_dispatch_dtypes(supported_dtypes)
def dtypes_dispatch_hint(dtypes):
# Function returns the appropriate dispatch function (from COMPLETE_DTYPES_DISPATCH and EXTENSIBLE_DTYPE_DISPATCH)
# and its string representation for the passed `dtypes`.
return_type = collections.namedtuple('return_type', 'dispatch_fn dispatch_fn_str')
# CUDA is not available, dtypes will be empty.
if len(dtypes) == 0:
return return_type((), str(tuple()))
set_dtypes = set(dtypes)
for dispatch in COMPLETE_DTYPES_DISPATCH:
# Short circuit if we get an exact match.
if set(dispatch()) == set_dtypes:
return return_type(dispatch, dispatch.__name__ + "()")
chosen_dispatch = None
chosen_dispatch_score = 0.
for dispatch in EXTENSIBLE_DTYPE_DISPATCH:
dispatch_dtypes = set(dispatch())
if not dispatch_dtypes.issubset(set_dtypes):
continue
score = len(dispatch_dtypes)
if score > chosen_dispatch_score:
chosen_dispatch_score = score
chosen_dispatch = dispatch
# If user passed dtypes which are lower than the lowest
# dispatch type available (not likely but possible in code path).
if chosen_dispatch is None:
return return_type((), str(dtypes))
return return_type(partial(dispatch, *tuple(set(dtypes) - set(dispatch()))),
dispatch.__name__ + str(tuple(set(dtypes) - set(dispatch()))))
def is_dynamic_dtype_set(op):
# Detect if the OpInfo entry acquired dtypes dynamically
# using `get_supported_dtypes`.
return op.dynamic_dtypes
def str_format_dynamic_dtype(op):
fmt_str = """
OpInfo({name},
dtypes={dtypes},
dtypesIfCUDA={dtypesIfCUDA},
)
""".format(name=op.name,
dtypes=dtypes_dispatch_hint(op.dtypes).dispatch_fn_str,
dtypesIfCUDA=dtypes_dispatch_hint(op.dtypesIfCUDA).dispatch_fn_str)
return fmt_str
|
docs/conf.py | parth-choudhary/drum | 265 | 12694302 | <reponame>parth-choudhary/drum
from __future__ import unicode_literals
# This file is automatically generated via sphinx-me
from sphinx_me import setup_conf; setup_conf(globals())
|
cointrol/core/models.py | fakegit/cointrol | 967 | 12694320 | <gh_stars>100-1000
from collections import OrderedDict
from django.db import models
from django.db.models import Sum
from django.db.models.signals import post_save
from django.utils import timezone
from django.dispatch import receiver
from django.contrib.auth.models import AbstractUser
from django.utils.functional import cached_property
from .castable import CastableModel
from .fields import PriceField, AmountField, PercentField
###############################################################################
# User models
###############################################################################
class User(AbstractUser):
"""Cointrol user"""
class Meta:
db_table = 'user'
@property
def account(self):
# TODO: support multiple Bitstamp accounts per user
return self.accounts.get()
class Account(models.Model):
"""Bitstamp account"""
user = models.ForeignKey(User, related_name='accounts')
username = models.CharField(max_length=255, blank=True, help_text='Bitstamp login number')
api_key = models.CharField(max_length=255, blank=True)
api_secret = models.CharField(max_length=255, blank=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'account'
def __str__(self):
return 'account for {}'.format(self.user)
def get_active_trading_session(self):
"""
Return the current `ACTIVE` and unfinished `TradingSession`, or `None`.
This is the exclusive method to get the session. It has
side-effects in the form of changing status from
`ACTIVE` to `FINISHED`, and from `QUEUED` to `ACTIVE` before
return a session.
"""
ACTIVE, QUEUED, FINISHED = (TradingSession.ACTIVE,
TradingSession.QUEUED,
TradingSession.FINISHED)
try:
session = self.trading_sessions.get(status=ACTIVE)
except TradingSession.DoesNotExist:
try:
session = self.trading_sessions\
.filter(status=QUEUED)\
.earliest()
except TradingSession.DoesNotExist:
session = None
while session:
if session.status == FINISHED:
session = None
elif session.status == QUEUED:
session.set_status(ACTIVE)
elif session.status == ACTIVE:
if not session.is_finished():
return session
else:
session.set_status(FINISHED)
try:
session = session.get_previous_by_created(account=self)
except TradingSession.DoesNotExist:
session = None
else:
raise TypeError('invalid session status: pk={}, {!r}'.format(
session.pk, session.status))
###############################################################################
# Trading
###############################################################################
class TradingStrategyProfile(CastableModel):
"""Base trading strategy configuration models class."""
note = models.CharField(max_length=255, blank=True)
account = models.ForeignKey(Account)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
@property
def type_name(self):
if type(self) == TradingStrategyProfile:
return self.cast().type_name
return type(self).__name__
def __str__(self):
return str(self.cast())
class RelativeStrategyProfile(TradingStrategyProfile):
"""Configuration for relative trading strategy."""
buy = PercentField()
sell = PercentField()
class Meta:
db_table = 'strategy_profile_relative'
def __str__(self):
return 'relative buy at {buy}%, sell at ${sell}%'.format(
buy=self.buy, sell=self.sell)
def save(self, *args, **kwargs):
min_fee = .2
assert self.buy < 100 - min_fee
assert self.sell > 100 + min_fee
return super().save(*args, **kwargs)
class FixedStrategyProfile(TradingStrategyProfile):
"""Configuration for fixed trading strategy."""
buy = PriceField()
sell = PriceField()
class Meta:
db_table = 'strategy_profile_fixed'
def __str__(self):
return 'fixed buy at ${buy}, sell at ${sell}'.format(
buy=self.buy, sell=self.sell)
class TradingSession(models.Model):
QUEUED, ACTIVE, FINISHED = 'queued', 'active', 'finished'
STATUSES = [QUEUED, ACTIVE, FINISHED]
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
account = models.ForeignKey(Account, related_name='trading_sessions')
status = models.CharField(
choices=zip(STATUSES, STATUSES),
max_length=255,
db_index=True
)
became_active = models.DateTimeField(null=True, blank=True)
became_finished = models.DateTimeField(null=True, blank=True)
note = models.CharField(max_length=255, blank=True)
strategy_profile = models.ForeignKey(TradingStrategyProfile)
# None - no limit; 1 - one repeat left; 0 - done
repeat_times = models.PositiveSmallIntegerField(default=None,
null=True,
blank=True)
# None - no limit
repeat_until = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = 'trading_session'
ordering = ['-created']
get_latest_by = 'created'
def __str__(self):
return '{status} session with {strategy}'.format(
status=self.status,
strategy=self.strategy_profile,
)
def set_status(self, status):
if status == self.ACTIVE:
assert self.status == self.QUEUED
assert self.became_active is None
assert self.became_finished is None
self.became_active = timezone.now()
elif status == self.FINISHED:
assert self.status == self.ACTIVE
assert self.became_active is not None
assert self.became_finished is None
self.became_finished = timezone.now()
self.status = status
self.save()
@cached_property
def profile(self):
"""Accessor for casted strategy profile."""
return self.strategy_profile.cast()
def is_expired(self):
return (self.repeat_until is not None
and self.repeat_until > timezone.now())
def is_done(self):
return (self.repeat_times is not None
and self.repeat_times >= self.orders.count())
def is_finished(self):
return self.is_expired() or self.is_done()
###############################################################################
# Bitstamp API-based models
# https://www.bitstamp.net/api/
###############################################################################
class Ticker(models.Model):
"""
{
high: "704.00",
last: "678.57",
timestamp: "1393958158",
bid: "678.49",
vwap: "677.88",
volume: "39060.90623024",
low: "633.64",
ask: "678.57"
}
"""
timestamp = models.DateTimeField()
volume = AmountField()
vwap = PriceField()
last = PriceField()
high = PriceField()
low = PriceField()
bid = PriceField()
ask = PriceField()
open = PriceField()
class Meta:
ordering = ['-timestamp']
get_latest_by = 'timestamp'
db_table = 'bitstamp_ticker'
def __str__(self):
return 'last={last}, timestamp={timestamp}'.format(**self.__dict__)
class Balance(models.Model):
"""
usd_balance - USD balance
btc_balance - BTC balance
usd_reserved - USD reserved in open orders
btc_reserved - BTC reserved in open orders
usd_available- USD available for trading
btc_available - BTC available for trading
fee - customer trading fee
"""
created = models.DateTimeField(auto_now_add=True)
account = models.ForeignKey(Account, related_name='balances')
inferred = models.BooleanField(default=False)
timestamp = models.DateTimeField()
# API fields
fee = PercentField()
usd_balance = AmountField()
btc_balance = AmountField()
usd_reserved = AmountField()
btc_reserved = AmountField()
btc_available = AmountField()
usd_available = AmountField()
eur_balance = AmountField()
xrp_balance = AmountField()
eur_reserved = AmountField()
xrp_reserved = AmountField()
eur_available = AmountField()
xrp_available = AmountField()
class Meta:
get_latest_by = 'timestamp'
ordering = ['-timestamp']
db_table = 'bitstamp_balance'
def __str__(self):
return '{usd:0>6} US$ | {btc:0>10} BTC'.format(
usd=self.usd_balance,
btc=self.btc_balance
)
class Order(models.Model):
OPEN, CANCELLED, PROCESSED = 'open', 'cancelled', 'processed'
STATUSES = [OPEN, CANCELLED, PROCESSED]
BUY, SELL = 0, 1
TYPES = OrderedDict([(BUY, 'buy'),
(SELL, 'sell')])
updated = models.DateTimeField(auto_now=True)
account = models.ForeignKey(Account, related_name='orders')
balance = models.ForeignKey(Balance, null=True, on_delete=models.PROTECT)
total = AmountField()
status = models.CharField(
default=None,
choices=zip(STATUSES, STATUSES),
max_length=255,
db_index=True
)
status_changed = models.DateTimeField(null=True, blank=True)
trading_session = models.ForeignKey(
TradingSession,
null=True,
on_delete=models.SET_NULL,
related_name='orders'
)
# API fields.
price = PriceField()
amount = AmountField()
type = models.IntegerField(choices=[(BUY, 'buy'), (SELL, 'sell')],
db_index=True)
datetime = models.DateTimeField()
def __str__(self):
return '{type} {amount} BTC at {price} US$'.format(
type=self.get_type_display(),
amount=self.amount,
price=self.price
)
class Meta:
ordering = ['-datetime']
get_latest_by = 'datetime'
db_table = 'bitstamp_order'
class Transaction(models.Model):
DEPOSIT, WITHDRAWAL, MARKET_TRADE = 0, 1, 2
TYPES = [DEPOSIT, WITHDRAWAL, MARKET_TRADE]
# MARKET_TRADE subtypes
SELL, BUY = 'sell', 'buy'
balance = models.ForeignKey(Balance, on_delete=models.PROTECT)
account = models.ForeignKey(Account, related_name='transactions')
updated = models.DateTimeField(auto_now=True)
# API fields.
datetime = models.DateTimeField()
btc = AmountField()
usd = AmountField()
fee = AmountField()
btc_usd = PriceField()
order = models.ForeignKey(Order, related_name='transactions', null=True)
type = models.PositiveSmallIntegerField(
db_index=True,
choices=[
(DEPOSIT, 'deposit'),
(WITHDRAWAL, 'withdrawal'),
(MARKET_TRADE, 'trade'),
]
)
class Meta:
ordering = ['-datetime']
get_latest_by = 'datetime'
db_table = 'bitstamp_transaction'
def __str__(self):
return '${usd} | {btc} BTC'.format(usd=self.usd, btc=self.btc)
@property
def trade_type(self):
if self.type == Transaction.MARKET_TRADE:
return Transaction.SELL if self.usd > 0 else Transaction.BUY
def save(self, *args, **kwargs):
if not self.balance_id:
self._create_balance()
return super().save(*args, **kwargs)
def _create_balance(self):
assert not self.balance_id
older = self.account.transactions.filter(datetime__lte=self.datetime)
aggregate = (
{'usd': 0, 'btc': 0, 'fee': 0}
if not older.exists() else
older.aggregate(usd=Sum('usd'), btc=Sum('btc'), fee=Sum('fee'))
)
# Reflect current transaction as well.
aggregate['usd'] += self.usd
aggregate['fee'] += self.fee
aggregate['btc'] += self.btc
self.balance = self.account.balances.create(
inferred=True,
timestamp=self.datetime,
usd_balance=aggregate['usd'] - aggregate['fee'],
btc_balance=aggregate['btc'],
fee=0,
)
###############################################################################
# Signal listeners
###############################################################################
# noinspection PyUnusedLocal
@receiver(post_save, sender=User)
def create_default_account(instance, created, **kwargs):
if created:
instance.accounts.create()
|
Chapter08/adding_legend_and_annotations.py | carltyndall/Python-Automation-Cookbook-Second-Edition | 155 | 12694369 | <gh_stars>100-1000
import matplotlib.pyplot as plt
# STEP 2
LEGEND = ('ProductA', 'ProductB', 'ProductC')
DATA = (
('Q1 2017', 100, 30, 3),
('Q2 2017', 105, 32, 15),
('Q3 2017', 125, 29, 40),
('Q4 2017', 115, 31, 80),
)
# STEP 3
POS = list(range(len(DATA)))
VALUESA = [valueA for label, valueA, valueB, valueC in DATA]
VALUESB = [valueB for label, valueA, valueB, valueC in DATA]
VALUESC = [valueC for label, valueA, valueB, valueC in DATA]
LABELS = [label for label, valueA, valueB, valueC in DATA]
# STEP 4
WIDTH = 0.2
valueA = plt.bar([p - WIDTH for p in POS], VALUESA, width=WIDTH)
valueB = plt.bar([p for p in POS], VALUESB, width=WIDTH)
valueC = plt.bar([p + WIDTH for p in POS], VALUESC, width=WIDTH)
plt.ylabel('Sales')
plt.xticks(POS, LABELS)
# STEP 5
plt.annotate('400% growth', xy=(1.2, 18), xytext=(1.3, 40),
horizontalalignment='center',
fontsize=9,
arrowprops={'facecolor': 'black',
'arrowstyle': "fancy",
'connectionstyle': "angle3",
})
# STEP 6
# Draw the legend outside the plot
plt.legend(LEGEND, title='Products', bbox_to_anchor=(1, 0.8))
plt.subplots_adjust(right=0.80)
# STEP 6
plt.show()
|
PhysicsTools/NanoAOD/python/btagWeightTable_cff.py | Purva-Chaudhari/cmssw | 852 | 12694383 | <filename>PhysicsTools/NanoAOD/python/btagWeightTable_cff.py
import FWCore.ParameterSet.Config as cms
from PhysicsTools.NanoAOD.common_cff import *
from PhysicsTools.NanoAOD.nano_eras_cff import *
btagSFdir="PhysicsTools/NanoAOD/data/btagSF/"
btagWeightTable = cms.EDProducer("BTagSFProducer",
src = cms.InputTag("linkedObjects","jets"),
cut = cms.string("pt > 25. && abs(eta) < 2.5"),
discNames = cms.vstring(
"pfCombinedInclusiveSecondaryVertexV2BJetTags",
"pfDeepCSVJetTags:probb+pfDeepCSVJetTags:probbb", #if multiple MiniAOD branches need to be summed up (e.g., DeepCSV b+bb), separate them using '+' delimiter
"pfCombinedMVAV2BJetTags"
),
discShortNames = cms.vstring(
"CSVV2",
"DeepCSVB",
"CMVA"
),
weightFiles = cms.vstring( #default settings are for 2017 94X. toModify function is called later for other eras.
btagSFdir+"CSVv2_94XSF_V2_B_F.csv",
btagSFdir+"DeepCSV_94XSF_V2_B_F.csv",
"unavailable" #if SFs for an algorithm in an era is unavailable, the corresponding branch will not be stored
),
operatingPoints = cms.vstring("3","3","3"), #loose = 0, medium = 1, tight = 2, reshaping = 3
measurementTypesB = cms.vstring("iterativefit","iterativefit","iterativefit"), #e.g. "comb", "incl", "ttbar", "iterativefit"
measurementTypesC = cms.vstring("iterativefit","iterativefit","iterativefit"),
measurementTypesUDSG = cms.vstring("iterativefit","iterativefit","iterativefit"),
sysTypes = cms.vstring("central","central","central")
)
for modifier in run2_miniAOD_80XLegacy, run2_nanoAOD_94X2016: # to be updated when SF for Summer16MiniAODv3 MC will be available
modifier.toModify(btagWeightTable,
cut = cms.string("pt > 25. && abs(eta) < 2.4"), #80X corresponds to 2016, |eta| < 2.4
weightFiles = cms.vstring( #80X corresponds to 2016 SFs
btagSFdir+"CSVv2_Moriond17_B_H.csv",
"unavailable",
btagSFdir+"cMVAv2_Moriond17_B_H.csv"
)
)
|
textbox/evaluator/meteor_evaluator.py | StevenTang1998/TextBox | 347 | 12694395 | <filename>textbox/evaluator/meteor_evaluator.py
# @Time : 2021/4/19
# @Author : <NAME>
# @Email : <EMAIL>
"""
textbox.evaluator.meteor_evaluator
#######################################
"""
import numpy as np
from nltk.translate.meteor_score import meteor_score
from textbox.evaluator.abstract_evaluator import AbstractEvaluator
class MeteorEvaluator(AbstractEvaluator):
def _preprocess(self, input_sentence):
return " ".join(input_sentence)
def _calc_metrics_info(self, generate_corpus, reference_corpus):
generate_corpus = [self._preprocess(generate_sentence) for generate_sentence in generate_corpus]
reference_corpus = [self._preprocess(reference_sentence) for reference_sentence in reference_corpus]
reference_corpus = [[reference_sentence] for reference_sentence in reference_corpus]
result = {}
scores = []
for gen, refs in zip(generate_corpus, reference_corpus):
score = meteor_score(refs, gen)
scores.append(score)
result['meteor'] = scores
return result
|
cmsplugin_cascade/admin.py | teklager/djangocms-cascade | 139 | 12694400 | <filename>cmsplugin_cascade/admin.py
from urllib.parse import urlparse
import requests
from django.contrib import admin
from django.contrib.sites.shortcuts import get_current_site
from django.forms import Media, widgets
from django.db.models import Q
from django.http import JsonResponse, HttpResponseForbidden, HttpResponseNotFound
from django.urls import re_path, reverse
from django.utils.translation import get_language_from_request
from cms.models.pagemodel import Page
from cms.extensions import PageExtensionAdmin
from cms.utils.page import get_page_from_path
from cmsplugin_cascade.models import CascadePage, IconFont
from cmsplugin_cascade.link.forms import format_page_link
@admin.register(CascadePage)
class CascadePageAdmin(PageExtensionAdmin):
add_form_template = change_form_template = 'cascade/admin/change_form.html'
fields = ['icon_font', 'menu_symbol']
@property
def media(self):
media = super().media
media += Media(css={'all': ['cascade/css/admin/cascadepage.css']},
js=['admin/js/jquery.init.js', 'cascade/js/admin/cascadepage.js'])
return media
def get_form(self, request, obj=None, **kwargs):
options = dict(kwargs, widgets={'menu_symbol': widgets.HiddenInput})
ModelForm = super().get_form(request, obj, **options)
return ModelForm
def get_urls(self):
urls = [
re_path(r'^get_page_sections/$', lambda _: JsonResponse({'element_ids': []}),
name='get_page_sections'), # just to reverse
re_path(r'^get_page_sections/(?P<page_pk>\d+)$',
self.admin_site.admin_view(self.get_page_sections)),
re_path(r'^published_pages/$', self.get_published_pagelist, name='get_published_pagelist'),
re_path(r'^fetch_fonticons/(?P<iconfont_id>[0-9]+)$', self.fetch_fonticons),
re_path(r'^fetch_fonticons/$', self.fetch_fonticons, name='fetch_fonticons'),
re_path(r'^validate_exturl/$', self.validate_exturl, name='validate_exturl'),
]
urls.extend(super().get_urls())
return urls
def get_page_sections(self, request, page_pk=None):
choices = []
try:
extended_glossary = self.model.objects.get(extended_object_id=page_pk).glossary
for key, val in extended_glossary['element_ids'].items():
choices.append((key, val))
except (self.model.DoesNotExist, KeyError):
pass
return JsonResponse({'element_ids': choices})
def get_published_pagelist(self, request, *args, **kwargs):
"""
This view is used by the SearchLinkField as the user types to feed the autocomplete drop-down.
"""
if not request.is_ajax():
return HttpResponseForbidden()
data = {'results': []}
language = get_language_from_request(request)
query_term = request.GET.get('term')
if not query_term:
return JsonResponse(data)
# first, try to resolve by URL if it points to a local resource
parse_result = urlparse(query_term)
if parse_result.netloc.split(':')[0] == request.META['HTTP_HOST'].split(':')[0]:
site = get_current_site(request)
path = parse_result.path.lstrip(reverse('pages-root')).rstrip('/')
page = get_page_from_path(site, path)
if page:
data['results'].append(self.get_result_set(language, page))
return JsonResponse(data)
# otherwise resolve by search term
matching_published_pages = Page.objects.published().public().filter(
Q(title_set__title__icontains=query_term, title_set__language=language)
| Q(title_set__path__icontains=query_term, title_set__language=language)
| Q(title_set__menu_title__icontains=query_term, title_set__language=language)
| Q(title_set__page_title__icontains=query_term, title_set__language=language)
).distinct().order_by('title_set__title').iterator()
for page in matching_published_pages:
data['results'].append(self.get_result_set(language, page))
if len(data['results']) > 15:
break
return JsonResponse(data)
def get_result_set(self, language, page):
title = page.get_title(language=language)
path = page.get_absolute_url(language=language)
return {
'id': page.pk,
'text': format_page_link(title, path),
}
def fetch_fonticons(self, request, iconfont_id=None):
try:
icon_font = IconFont.objects.get(id=iconfont_id)
except IconFont.DoesNotExist:
return HttpResponseNotFound("IconFont with id={} does not exist".format(iconfont_id))
else:
data = dict(icon_font.config_data)
data.pop('glyphs', None)
data['families'] = icon_font.get_icon_families()
return JsonResponse(data)
def validate_exturl(self, request):
"""
Perform a GET request onto the given external URL and return its status.
"""
exturl = request.GET.get('exturl')
request_headers = {'User-Agent': 'Django-CMS-Cascade'}
try:
response = requests.get(exturl, allow_redirects=True, headers=request_headers)
except Exception:
return JsonResponse({'status_code': 500})
else:
return JsonResponse({'status_code': response.status_code})
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
extra_context = dict(extra_context or {}, icon_fonts=IconFont.objects.all())
return super().changeform_view(
request, object_id=object_id, form_url=form_url, extra_context=extra_context)
|
qt_material/resources/__init__.py | 5yutan5/qt-material | 692 | 12694405 | <reponame>5yutan5/qt-material<gh_stars>100-1000
from .generate import ResourseGenerator, RESOURCES_PATH
|
test/test_extractor.py | Usama0121/flashtext | 5,330 | 12694453 | <reponame>Usama0121/flashtext
from flashtext import KeywordProcessor
import logging
import unittest
import json
logger = logging.getLogger(__name__)
class TestKeywordExtractor(unittest.TestCase):
def setUp(self):
logger.info("Starting...")
with open('test/keyword_extractor_test_cases.json') as f:
self.test_cases = json.load(f)
def tearDown(self):
logger.info("Ending.")
def test_extract_keywords(self):
"""For each of the test case initialize a new KeywordProcessor.
Add the keywords the test case to KeywordProcessor.
Extract keywords and check if they match the expected result for the test case.
"""
for test_id, test_case in enumerate(self.test_cases):
keyword_processor = KeywordProcessor()
keyword_processor.add_keywords_from_dict(test_case['keyword_dict'])
keywords_extracted = keyword_processor.extract_keywords(test_case['sentence'])
self.assertEqual(keywords_extracted, test_case['keywords'],
"keywords_extracted don't match the expected results for test case: {}".format(test_id))
def test_extract_keywords_case_sensitive(self):
"""For each of the test case initialize a new KeywordProcessor.
Add the keywords the test case to KeywordProcessor.
Extract keywords and check if they match the expected result for the test case.
"""
for test_id, test_case in enumerate(self.test_cases):
keyword_processor = KeywordProcessor(case_sensitive=True)
keyword_processor.add_keywords_from_dict(test_case['keyword_dict'])
keywords_extracted = keyword_processor.extract_keywords(test_case['sentence'])
self.assertEqual(keywords_extracted, test_case['keywords_case_sensitive'],
"keywords_extracted don't match the expected results for test case: {}".format(test_id))
if __name__ == '__main__':
unittest.main()
|
aat/core/data/trade.py | mthomascarcamo/aat | 305 | 12694456 | <filename>aat/core/data/trade.py
from datetime import datetime
from typing import Any, Dict, List, Optional, Type, Union
from .cpp import _CPP, _make_cpp_trade
from .order import Order
from ..instrument import Instrument
from ..exchange import ExchangeType
from ...config import DataType, Side
class Trade(object):
__slots__ = [
"__id",
"__type",
"__price",
"__volume",
"__maker_orders",
"__taker_order",
# FIXME hide
"__my_order",
"__slippage",
"__transaction_cost",
]
# for convenience
Types = DataType
def __new__(cls, *args, **kwargs): # type: ignore
if _CPP:
return _make_cpp_trade(*args, **kwargs)
return super(Trade, cls).__new__(cls)
def __init__(
self,
volume: float,
price: float,
taker_order: Order,
maker_orders: Optional[List[Order]] = None,
**kwargs: Any,
) -> None:
self.__id = kwargs.get(
"id", "0"
) # on construction, provide no ID until exchange assigns one
self.__type = DataType.TRADE
assert isinstance(price, (float, int))
assert isinstance(volume, (float, int))
assert isinstance(taker_order, Order)
# assert(len(maker_orders) > 0) # not necessarily
assert volume == taker_order.filled
self.__price = price
self.__volume = volume
self.__maker_orders = maker_orders or []
self.__taker_order = taker_order
self.__my_order = kwargs.get("my_order", None)
self.__slippage = 0.0
self.__transaction_cost = 0.0
# ******** #
# Readonly #
# ******** #
@property
def timestamp(self) -> datetime:
return self.taker_order.timestamp
@property
def type(self) -> DataType:
return self.__type
@property
def volume(self) -> float:
return self.__volume
@property
def price(self) -> float:
return self.__price
@property
def instrument(self) -> Instrument:
return self.taker_order.instrument
@property
def exchange(self) -> ExchangeType:
return self.taker_order.exchange
@property
def side(self) -> Side:
return self.taker_order.side
@property
def notional(self) -> float:
return self.price * self.volume
def finished(self) -> bool:
return self.taker_order.finished()
# ***********#
# Read/write #
# ***********#
@property
def id(self) -> str:
return self.__id
@id.setter
def id(self, id: str) -> None:
assert isinstance(id, (str, int))
self.__id = str(id)
@property
def maker_orders(self) -> List[Order]:
# no setter
return self.__maker_orders
@property
def taker_order(self) -> Order:
return self.__taker_order
@property
def my_order(self) -> Order:
return self.__my_order
@my_order.setter
def my_order(self, order: Order) -> None:
assert isinstance(order, Order)
self.__my_order = order
def __repr__(self) -> str:
return f"Trade( id={self.id}, timestamp={self.timestamp}, {self.volume}@{self.price}, \n\ttaker_order={self.taker_order},\n\tmaker_orders={self.maker_orders}, )"
def __eq__(self, other: object) -> bool:
assert isinstance(other, Trade)
return self.id == other.id and self.timestamp == other.timestamp
def json(self, flat: bool = False) -> Dict[str, Union[str, int, float, dict]]:
"""convert trade to flat json"""
ret: Dict[str, Union[str, int, float, dict]] = {
"id": self.id,
"timestamp": self.timestamp.timestamp(),
"price": self.price,
"volume": self.volume,
}
if flat:
# Typings here to enforce flatness of json
taker_order: Dict[str, Union[str, int, float, dict]] = {
"taker_order." + k: v
for k, v in self.taker_order.json(flat=flat).items()
}
maker_orders: List[Dict[str, Union[str, int, float, dict]]] = [
{"maker_order{}." + k: v for k, v in order.json(flat=flat).items()}
for i, order in enumerate(self.maker_orders)
]
# update with taker order dict
ret.update(taker_order)
# update with maker order dicts
for maker_order in maker_orders:
ret.update(maker_order)
else:
ret["taker_order"] = self.taker_order.json() # type: ignore
ret["maker_orders"] = [m.json() for m in self.maker_orders] # type: ignore
return ret
@staticmethod
def fromJson(jsn: dict) -> "Trade":
ret = Trade(
jsn["volume"],
jsn["price"],
Order.fromJson(jsn["taker_order"]),
[Order.fromJson(x) for x in jsn["maker_orders"]],
)
if "id" in jsn:
ret.id = str(jsn.get("id"))
return ret
@staticmethod
def schema() -> Dict[str, Type]:
# FIXME
# this varies from the json schema
return {"id": int, "timestamp": int, "volume": float, "price": float}
|
.evergreen/ocsp/mock_ocsp_responder.py | tanlisu/mongo-php-library | 3,370 | 12694475 | #
# This file has been modified in 2019 by MongoDB Inc.
#
# OCSPBuilder is derived from https://github.com/wbond/ocspbuilder
# OCSPResponder is derived from https://github.com/threema-ch/ocspresponder
# Copyright (c) 2015-2018 <NAME> <<EMAIL>>
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Copyright 2016 Threema GmbH
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, division, absolute_import, print_function
import logging
import base64
import inspect
import re
import enum
import sys
import textwrap
from datetime import datetime, timezone, timedelta
from typing import Callable, Tuple, Optional
from asn1crypto import x509, keys, core, ocsp
from asn1crypto.ocsp import OCSPRequest, OCSPResponse
from oscrypto import asymmetric
from flask import Flask, request, Response
__version__ = '0.10.2'
__version_info__ = (0, 10, 2)
logger = logging.getLogger(__name__)
if sys.version_info < (3,):
byte_cls = str
else:
byte_cls = bytes
def _pretty_message(string, *params):
"""
Takes a multi-line string and does the following:
- dedents
- converts newlines with text before and after into a single line
- strips leading and trailing whitespace
:param string:
The string to format
:param *params:
Params to interpolate into the string
:return:
The formatted string
"""
output = textwrap.dedent(string)
# Unwrap lines, taking into account bulleted lists, ordered lists and
# underlines consisting of = signs
if output.find('\n') != -1:
output = re.sub('(?<=\\S)\n(?=[^ \n\t\\d\\*\\-=])', ' ', output)
if params:
output = output % params
output = output.strip()
return output
def _type_name(value):
"""
:param value:
A value to get the object name of
:return:
A unicode string of the object name
"""
if inspect.isclass(value):
cls = value
else:
cls = value.__class__
if cls.__module__ in set(['builtins', '__builtin__']):
return cls.__name__
return '%s.%s' % (cls.__module__, cls.__name__)
def _writer(func):
"""
Decorator for a custom writer, but a default reader
"""
name = func.__name__
return property(fget=lambda self: getattr(self, '_%s' % name), fset=func)
class OCSPResponseBuilder(object):
_response_status = None
_certificate = None
_certificate_status = None
_revocation_date = None
_certificate_issuer = None
_hash_algo = None
_key_hash_algo = None
_nonce = None
_this_update = None
_next_update = None
_response_data_extensions = None
_single_response_extensions = None
def __init__(self, response_status, certificate_status_list=[], revocation_date=None):
"""
Unless changed, responses will use SHA-256 for the signature,
and will be valid from the moment created for one week.
:param response_status:
A unicode string of OCSP response type:
- "successful" - when the response includes information about the certificate
- "malformed_request" - when the request could not be understood
- "internal_error" - when an internal error occured with the OCSP responder
- "try_later" - when the OCSP responder is temporarily unavailable
- "sign_required" - when the OCSP request must be signed
- "unauthorized" - when the responder is not the correct responder for the certificate
:param certificate_list:
A list of tuples with certificate serial number and certificate status objects.
certificate_status:
A unicode string of the status of the certificate. Only required if
the response_status is "successful".
- "good" - when the certificate is in good standing
- "revoked" - when the certificate is revoked without a reason code
- "key_compromise" - when a private key is compromised
- "ca_compromise" - when the CA issuing the certificate is compromised
- "affiliation_changed" - when the certificate subject name changed
- "superseded" - when the certificate was replaced with a new one
- "cessation_of_operation" - when the certificate is no longer needed
- "certificate_hold" - when the certificate is temporarily invalid
- "remove_from_crl" - only delta CRLs - when temporary hold is removed
- "privilege_withdrawn" - one of the usages for a certificate was removed
- "unknown" - the responder doesn't know about the certificate being requested
:param revocation_date:
A datetime.datetime object of when the certificate was revoked, if
the response_status is "successful" and the certificate status is
not "good" or "unknown".
"""
self._response_status = response_status
self._certificate_status_list = certificate_status_list
self._revocation_date = revocation_date
self._key_hash_algo = 'sha1'
self._hash_algo = 'sha256'
self._response_data_extensions = {}
self._single_response_extensions = {}
@_writer
def nonce(self, value):
"""
The nonce that was provided during the request.
"""
if not isinstance(value, byte_cls):
raise TypeError(_pretty_message(
'''
nonce must be a byte string, not %s
''',
_type_name(value)
))
self._nonce = value
@_writer
def certificate_issuer(self, value):
"""
An asn1crypto.x509.Certificate object of the issuer of the certificate.
This should only be set if the OCSP responder is not the issuer of
the certificate, but instead a special certificate only for OCSP
responses.
"""
if value is not None:
is_oscrypto = isinstance(value, asymmetric.Certificate)
if not is_oscrypto and not isinstance(value, x509.Certificate):
raise TypeError(_pretty_message(
'''
certificate_issuer must be an instance of
asn1crypto.x509.Certificate or
oscrypto.asymmetric.Certificate, not %s
''',
_type_name(value)
))
if is_oscrypto:
value = value.asn1
self._certificate_issuer = value
@_writer
def next_update(self, value):
"""
A datetime.datetime object of when the response may next change. This
should only be set if responses are cached. If responses are generated
fresh on every request, this should not be set.
"""
if not isinstance(value, datetime):
raise TypeError(_pretty_message(
'''
next_update must be an instance of datetime.datetime, not %s
''',
_type_name(value)
))
self._next_update = value
def build(self, responder_private_key=None, responder_certificate=None):
"""
Validates the request information, constructs the ASN.1 structure and
signs it.
The responder_private_key and responder_certificate parameters are onlystr
required if the response_status is "successful".
:param responder_private_key:
An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey
object for the private key to sign the response with
:param responder_certificate:
An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate
object of the certificate associated with the private key
:return:
An asn1crypto.ocsp.OCSPResponse object of the response
"""
if self._response_status != 'successful':
return ocsp.OCSPResponse({
'response_status': self._response_status
})
is_oscrypto = isinstance(responder_private_key, asymmetric.PrivateKey)
if not isinstance(responder_private_key, keys.PrivateKeyInfo) and not is_oscrypto:
raise TypeError(_pretty_message(
'''
responder_private_key must be an instance ofthe c
asn1crypto.keys.PrivateKeyInfo or
oscrypto.asymmetric.PrivateKey, not %s
''',
_type_name(responder_private_key)
))
cert_is_oscrypto = isinstance(responder_certificate, asymmetric.Certificate)
if not isinstance(responder_certificate, x509.Certificate) and not cert_is_oscrypto:
raise TypeError(_pretty_message(
'''
responder_certificate must be an instance of
asn1crypto.x509.Certificate or
oscrypto.asymmetric.Certificate, not %s
''',
_type_name(responder_certificate)
))
if cert_is_oscrypto:
responder_certificate = responder_certificate.asn1
if self._certificate_status_list is None:
raise ValueError(_pretty_message(
'''
certificate_status_list must be set if the response_status is
"successful"
'''
))
def _make_extension(name, value):
return {
'extn_id': name,
'critical': False,
'extn_value': value
}
responses = []
for serial, status in self._certificate_status_list:
response_data_extensions = []
single_response_extensions = []
for name, value in self._response_data_extensions.items():
response_data_extensions.append(_make_extension(name, value))
if self._nonce:
response_data_extensions.append(
_make_extension('nonce', self._nonce)
)
if not response_data_extensions:
response_data_extensions = None
for name, value in self._single_response_extensions.items():
single_response_extensions.append(_make_extension(name, value))
if self._certificate_issuer:
single_response_extensions.append(
_make_extension(
'certificate_issuer',
[
x509.GeneralName(
name='directory_name',
value=self._certificate_issuer.subject
)
]
)
)
if not single_response_extensions:
single_response_extensions = None
responder_key_hash = getattr(responder_certificate.public_key, self._key_hash_algo)
if status == 'good':
cert_status = ocsp.CertStatus(
name='good',
value=core.Null()
)
elif status == 'unknown':
cert_status = ocsp.CertStatus(
name='unknown',
value=core.Null()
)
else:
reason = status if status != 'revoked' else 'unspecified'
cert_status = ocsp.CertStatus(
name='revoked',
value={
'revocation_time': self._revocation_date,
'revocation_reason': reason,
}
)
issuer = self._certificate_issuer if self._certificate_issuer else responder_certificate
produced_at = datetime.now(timezone.utc).replace(microsecond=0)
if self._this_update is None:
self._this_update = produced_at
if self._next_update is None:
self._next_update = (self._this_update + timedelta(days=7)).replace(microsecond=0)
response = {
'cert_id': {
'hash_algorithm': {
'algorithm': self._key_hash_algo
},
'issuer_name_hash': getattr(issuer.subject, self._key_hash_algo),
'issuer_key_hash': getattr(issuer.public_key, self._key_hash_algo),
'serial_number': serial,
},
'cert_status': cert_status,
'this_update': self._this_update,
'next_update': self._next_update,
'single_extensions': single_response_extensions
}
responses.append(response)
response_data = ocsp.ResponseData({
'responder_id': ocsp.ResponderId(name='by_key', value=responder_key_hash),
'produced_at': produced_at,
'responses': responses,
'response_extensions': response_data_extensions
})
signature_algo = responder_private_key.algorithm
if signature_algo == 'ec':
signature_algo = 'ecdsa'
signature_algorithm_id = '%s_%s' % (self._hash_algo, signature_algo)
if responder_private_key.algorithm == 'rsa':
sign_func = asymmetric.rsa_pkcs1v15_sign
elif responder_private_key.algorithm == 'dsa':
sign_func = asymmetric.dsa_sign
elif responder_private_key.algorithm == 'ec':
sign_func = asymmetric.ecdsa_sign
if not is_oscrypto:
responder_private_key = asymmetric.load_private_key(responder_private_key)
signature_bytes = sign_func(responder_private_key, response_data.dump(), self._hash_algo)
certs = None
if self._certificate_issuer and getattr(self._certificate_issuer.public_key, self._key_hash_algo) != responder_key_hash:
certs = [responder_certificate]
return ocsp.OCSPResponse({
'response_status': self._response_status,
'response_bytes': {
'response_type': 'basic_ocsp_response',
'response': {
'tbs_response_data': response_data,
'signature_algorithm': {'algorithm': signature_algorithm_id},
'signature': signature_bytes,
'certs': certs,
}
}
})
# Enums
class ResponseStatus(enum.Enum):
successful = 'successful'
malformed_request = 'malformed_request'
internal_error = 'internal_error'
try_later = 'try_later'
sign_required = 'sign_required'
unauthorized = 'unauthorized'
class CertificateStatus(enum.Enum):
good = 'good'
revoked = 'revoked'
key_compromise = 'key_compromise'
ca_compromise = 'ca_compromise'
affiliation_changed = 'affiliation_changed'
superseded = 'superseded'
cessation_of_operation = 'cessation_of_operation'
certificate_hold = 'certificate_hold'
remove_from_crl = 'remove_from_crl'
privilege_withdrawn = 'privilege_withdrawn'
unknown = 'unknown'
# API endpoints
FAULT_REVOKED = "revoked"
FAULT_UNKNOWN = "unknown"
app = Flask(__name__)
class OCSPResponder:
def __init__(self, issuer_cert: str, responder_cert: str, responder_key: str,
fault: str, next_update_seconds: int):
"""
Create a new OCSPResponder instance.
:param issuer_cert: Path to the issuer certificate.
:param responder_cert: Path to the certificate of the OCSP responder
with the `OCSP Signing` extension.
:param responder_key: Path to the private key belonging to the
responder cert.
:param validate_func: A function that - given a certificate serial -
will return the appropriate :class:`CertificateStatus` and -
depending on the status - a revocation datetime.
:param cert_retrieve_func: A function that - given a certificate serial -
will return the corresponding certificate as a string.
:param next_update_seconds: The ``nextUpdate`` value that will be written
into the response. Default: 9 hours.
"""
# Certs and keys
self._issuer_cert = asymmetric.load_certificate(issuer_cert)
self._responder_cert = asymmetric.load_certificate(responder_cert)
self._responder_key = asymmetric.load_private_key(responder_key)
# Next update
self._next_update_seconds = next_update_seconds
self._fault = fault
def _fail(self, status: ResponseStatus) -> OCSPResponse:
builder = OCSPResponseBuilder(response_status=status.value)
return builder.build()
def parse_ocsp_request(self, request_der: bytes) -> OCSPRequest:
"""
Parse the request bytes, return an ``OCSPRequest`` instance.
"""
return OCSPRequest.load(request_der)
def validate(self):
time = datetime(2018, 1, 1, 1, 00, 00, 00, timezone.utc)
if self._fault == FAULT_REVOKED:
return (CertificateStatus.revoked, time)
elif self._fault == FAULT_UNKNOWN:
return (CertificateStatus.unknown, None)
elif self._fault != None:
raise NotImplemented('Fault type could not be found')
return (CertificateStatus.good, time)
def _build_ocsp_response(self, ocsp_request: OCSPRequest) -> OCSPResponse:
"""
Create and return an OCSP response from an OCSP request.
"""
# Get the certificate serial
tbs_request = ocsp_request['tbs_request']
request_list = tbs_request['request_list']
if len(request_list) < 1:
logger.warning('Received OCSP request with no requests')
raise NotImplemented('Empty requests not supported')
single_request = request_list[0] # TODO: Support more than one request
req_cert = single_request['req_cert']
serial = req_cert['serial_number'].native
# Check certificate status
try:
certificate_status, revocation_date = self.validate()
except Exception as e:
logger.exception('Could not determine certificate status: %s', e)
return self._fail(ResponseStatus.internal_error)
certificate_status_list = [(serial, certificate_status.value)]
# Build the response
builder = OCSPResponseBuilder(**{
'response_status': ResponseStatus.successful.value,
'certificate_status_list': certificate_status_list,
'revocation_date': revocation_date,
})
# Parse extensions
for extension in tbs_request['request_extensions']:
extn_id = extension['extn_id'].native
critical = extension['critical'].native
value = extension['extn_value'].parsed
# This variable tracks whether any unknown extensions were encountered
unknown = False
# Handle nonce extension
if extn_id == 'nonce':
builder.nonce = value.native
# That's all we know
else:
unknown = True
# If an unknown critical extension is encountered (which should not
# usually happen, according to RFC 6960 4.1.2), we should throw our
# hands up in despair and run.
if unknown is True and critical is True:
logger.warning('Could not parse unknown critical extension: %r',
dict(extension.native))
return self._fail(ResponseStatus.internal_error)
# If it's an unknown non-critical extension, we can safely ignore it.
elif unknown is True:
logger.info('Ignored unknown non-critical extension: %r', dict(extension.native))
# Set certificate issuer
builder.certificate_issuer = self._issuer_cert
# Set next update date
now = datetime.now(timezone.utc)
builder.next_update = (now + timedelta(seconds=self._next_update_seconds)).replace(microsecond=0)
return builder.build(self._responder_key, self._responder_cert)
def build_http_response(self, request_der: bytes) -> Response:
global app
response_der = self._build_ocsp_response(request_der).dump()
resp = app.make_response((response_der, 200))
resp.headers['content_type'] = 'application/ocsp-response'
return resp
responder = None
def init_responder(issuer_cert: str, responder_cert: str, responder_key: str, fault: str, next_update_seconds: int):
global responder
responder = OCSPResponder(issuer_cert=issuer_cert, responder_cert=responder_cert, responder_key=responder_key, fault=fault, next_update_seconds=next_update_seconds)
def init(port=8080, debug=False):
logger.info('Launching %sserver on port %d', 'debug' if debug else '', port)
app.run(port=port, debug=debug)
@app.route('/', methods=['GET'])
def _handle_root():
return 'ocsp-responder'
@app.route('/status/', defaults={'u_path': ''}, methods=['GET'])
@app.route('/status/<path:u_path>', methods=['GET'])
def _handle_get(u_path):
global responder
"""
An OCSP GET request contains the DER-in-base64 encoded OCSP request in the
HTTP request URL.
"""
der = base64.b64decode(u_path)
ocsp_request = responder.parse_ocsp_request(der)
return responder.build_http_response(ocsp_request)
@app.route('/status', methods=['POST'])
def _handle_post():
global responder
"""
An OCSP POST request contains the DER encoded OCSP request in the HTTP
request body.
"""
ocsp_request = responder.parse_ocsp_request(request.data)
return responder.build_http_response(ocsp_request)
|
posthog/migrations/0051_precalculate_cohorts.py | avoajaugochukwu/posthog | 7,409 | 12694480 | <filename>posthog/migrations/0051_precalculate_cohorts.py
# Generated by Django 3.0.5 on 2020-05-07 18:22
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0050_dashboards"),
]
operations = [
migrations.AddField(
model_name="cohort",
name="created_at",
field=models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True),
),
migrations.AddField(
model_name="cohort",
name="created_by",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(model_name="cohort", name="is_calculating", field=models.BooleanField(default=False),),
migrations.AddField(
model_name="cohort", name="last_calculation", field=models.DateTimeField(blank=True, null=True),
),
migrations.CreateModel(
name="CohortPeople",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID",),),
("cohort", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="posthog.Cohort"),),
("person", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="posthog.Person"),),
],
),
migrations.AddField(
model_name="cohort",
name="people",
field=models.ManyToManyField(through="posthog.CohortPeople", to="posthog.Person"),
),
migrations.AddIndex(
model_name="cohortpeople",
index=models.Index(fields=["cohort_id", "person_id"], name="posthog_coh_cohort__89c25f_idx"),
),
]
|
tripletloss/tripletlosslayer.py | YoungerGao/tripletloss | 283 | 12694502 | <reponame>YoungerGao/tripletloss
# --------------------------------------------------------
# TRIPLET LOSS
# Copyright (c) 2015 Pinguo Tech.
# Written by <NAME>
# --------------------------------------------------------
"""The data layer used during training a VGG_FACE network by triplet loss.
"""
import caffe
import numpy as np
from numpy import *
import yaml
from multiprocessing import Process, Queue
from caffe._caffe import RawBlobVec
from sklearn import preprocessing
class TripletLayer(caffe.Layer):
global no_residual_list,margin
def setup(self, bottom, top):
"""Setup the TripletDataLayer."""
assert shape(bottom[0].data) == shape(bottom[1].data)
assert shape(bottom[0].data) == shape(bottom[2].data)
layer_params = yaml.load(self.param_str_)
self.margin = layer_params['margin']
self.a = 1
top[0].reshape(1)
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
anchor_minibatch_db = []
positive_minibatch_db = []
negative_minibatch_db = []
for i in range((bottom[0]).num):
anchor_minibatch_db.append(bottom[0].data[i])
positive_minibatch_db.append(bottom[1].data[i])
negative_minibatch_db.append(bottom[2].data[i])
loss = float(0)
self.no_residual_list = []
for i in range(((bottom[0]).num)):
a = np.array(anchor_minibatch_db[i])
p = np.array(positive_minibatch_db[i])
n = np.array(negative_minibatch_db[i])
a_p = a - p
a_n = a - n
ap = np.dot(a_p,a_p)
an = np.dot(a_n,a_n)
dist = (self.margin + ap - an)
_loss = max(dist,0.0)
if i == 0:
print ('loss:'+' ap:'+str(ap)+' '+'an:'+str(an))
if _loss == 0 :
self.no_residual_list.append(i)
loss += _loss
loss = (loss/(2*(bottom[0]).num))
top[0].data[...] = loss
def backward(self, top, propagate_down, bottom):
count = 0
if propagate_down[0]:
for i in range((bottom[0]).num):
if not i in self.no_residual_list:
x_a = bottom[0].data[i]
x_p = bottom[1].data[i]
x_n = bottom[2].data[i]
#print x_a,x_p,x_n
bottom[0].diff[i] = self.a*((x_n - x_p)/((bottom[0]).num))
bottom[1].diff[i] = self.a*((x_p - x_a)/((bottom[0]).num))
bottom[2].diff[i] = self.a*((x_a - x_n)/((bottom[0]).num))
count += 1
else:
bottom[0].diff[i] = np.zeros(shape(bottom[0].data)[1])
bottom[1].diff[i] = np.zeros(shape(bottom[0].data)[1])
bottom[2].diff[i] = np.zeros(shape(bottom[0].data)[1])
#print 'select gradient_loss:',bottom[0].diff[0][0]
#print shape(bottom[0].diff),shape(bottom[1].diff),shape(bottom[2].diff)
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
|
sleepypuppy/admin/models.py | hexlism/css_platform | 952 | 12694530 | # Copyright 2015 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import Column, Integer, ForeignKey
from sleepypuppy import db
# Database association models
user_associations = db.Table(
'user_associations',
Column('user_id', Integer, ForeignKey('users.id')),
Column('assessment_id', Integer, ForeignKey('assessments.id')),
)
taxonomy = db.Table(
'taxonomy',
Column('puppyscript_id', Integer, ForeignKey('puppyscript.id')),
Column('payload', Integer, ForeignKey('payloads.id')),
)
|
Algo and DSA/LeetCode-Solutions-master/Python/relative-sort-array.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 12694535 | <reponame>Sourav692/FAANG-Interview-Preparation<gh_stars>1000+
# Time: O(nlogn)
# Space: O(n)
class Solution(object):
def relativeSortArray(self, arr1, arr2):
"""
:type arr1: List[int]
:type arr2: List[int]
:rtype: List[int]
"""
lookup = {v: i for i, v in enumerate(arr2)}
return sorted(arr1, key=lambda i: lookup.get(i, len(arr2)+i))
|
corehq/apps/cleanup/management/commands/purge_docs.py | dimagilg/commcare-hq | 471 | 12694543 | import sys
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
from corehq.util.couch import get_db_by_doc_type
class Command(BaseCommand):
help = "Purge ALL documents of a particular type. E.g. purge_docs MyDocType,AnotherOne"
def handle(self, doc_types, *args, **options):
user_input = input('\n'.join([
'\n\nReally delete documents of the following types: {}?',
'This operation is not reversible. Enter a number N to delete the first '
'N found, or type "delete all" to delete everything.',
'',
]).format(doc_types))
if user_input == 'delete all':
remaining = None
else:
try:
remaining = int(user_input)
except ValueError:
print('aborting')
sys.exit()
doc_types = doc_types.split(',')
deleted = 0
# unfortunately the only couch view we have for this needs to go by domain
# will be a bit slow
domain_names = Domain.get_all_names()
for doc_type in doc_types:
db = get_db_by_doc_type(doc_type)
if not db:
print("Cannot find db for {}, skipping".format(doc_type))
continue
for domain in domain_names:
docs = [row['doc'] for row in db.view(
'by_domain_doc_type_date/view',
startkey=[domain, doc_type],
endkey=[domain, doc_type, {}],
reduce=False,
include_docs=True,
)][:remaining]
if docs:
count = len(docs)
print('deleting {} {}s from {}'.format(count, doc_type, domain))
db.delete_docs(docs)
deleted += count
if remaining is not None:
remaining -= count
if remaining <= 0:
return
print('successfully deleted {} documents'.format(deleted))
|
scripts/autosample.py | bcnoexceptions/mtgencode | 159 | 12694565 | <gh_stars>100-1000
#!/usr/bin/env python
import sys
import os
import subprocess
import random
def extract_cp_name(name):
# "lm_lstm_epoch50.00_0.1870.t7"
if not (name[:13] == 'lm_lstm_epoch' and name[-3:] == '.t7'):
return None
name = name[13:-3]
(epoch, vloss) = tuple(name.split('_'))
return (float(epoch), float(vloss))
def sample(cp, temp, count, seed = None, ident = 'output'):
if seed is None:
seed = random.randint(-1000000000, 1000000000)
outfile = cp + '.' + ident + '.' + str(temp) + '.txt'
cmd = ('th sample.lua ' + cp
+ ' -temperature ' + str(temp)
+ ' -length ' + str(count)
+ ' -seed ' + str(seed)
+ ' >> ' + outfile)
if os.path.exists(outfile):
print(outfile + ' already exists, skipping')
return False
else:
# UNSAFE SHELL=TRUE FOR CONVENIENCE
subprocess.call('echo "' + cmd + '" | tee ' + outfile, shell=True)
subprocess.call(cmd, shell=True)
def find_best_cp(cpdir):
best = None
best_cp = None
for path in os.listdir(cpdir):
fullpath = os.path.join(cpdir, path)
if os.path.isfile(fullpath):
extracted = extract_cp_name(path)
if not extracted is None:
(epoch, vloss) = extracted
if best is None or vloss < best:
best = vloss
best_cp = fullpath
return best_cp
def process_dir(cpdir, temp, count, seed = None, ident = 'output', verbose = False):
if verbose:
print('processing ' + cpdir)
best_cp = find_best_cp(cpdir)
if not best_cp is None:
sample(best_cp, temp, count, seed=seed, ident=ident)
for path in os.listdir(cpdir):
fullpath = os.path.join(cpdir, path)
if os.path.isdir(fullpath):
process_dir(fullpath, temp, count, seed=seed, ident=ident, verbose=verbose)
def main(rnndir, cpdir, temp, count, seed = None, ident = 'output', verbose = False):
if not os.path.isdir(rnndir):
raise ValueError('bad rnndir: ' + rnndir)
if not os.path.isdir(cpdir):
raise ValueError('bad cpdir: ' + cpdir)
os.chdir(rnndir)
process_dir(cpdir, temp, count, seed=seed, ident=ident, verbose=verbose)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('rnndir', #nargs='?'. default=None,
help='base rnn directory, must contain sample.lua')
parser.add_argument('cpdir', #nargs='?', default=None,
help='checkpoint directory, all subdirectories will be processed')
parser.add_argument('-t', '--temperature', action='store', default='1.0',
help='sampling temperature')
parser.add_argument('-c', '--count', action='store', default='1000000',
help='number of characters to sample each time')
parser.add_argument('-s', '--seed', action='store', default=None,
help='fixed seed; if not present, a random seed will be used')
parser.add_argument('-i', '--ident', action='store', default='output',
help='identifier to include in the output filenames')
parser.add_argument('-v', '--verbose', action='store_true',
help='verbose output')
args = parser.parse_args()
if args.seed is None:
seed = None
else:
seed = int(args.seed)
main(args.rnndir, args.cpdir, float(args.temperature), int(args.count),
seed=seed, ident=args.ident, verbose = args.verbose)
exit(0)
|
tensorflow_addons/rnn/peephole_lstm_cell.py | leondgarse/addons | 1,560 | 12694568 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements PeepholeLSTM Cell."""
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package="Addons")
class PeepholeLSTMCell(tf.keras.layers.LSTMCell):
"""Equivalent to `tf.keras.layers.LSTMCell` class but adds peephole connections.
Peephole connections allow the gates to utilize the previous internal state as
well as the previous hidden state (which is what LSTMCell is limited to).
This allows PeepholeLSTMCell to better learn precise timings over LSTMCell.
From [Gers et al., 2002](
http://www.jmlr.org/papers/volume3/gers02a/gers02a.pdf):
"We find that LSTM augmented by 'peephole connections' from its internal
cells to its multiplicative gates can learn the fine distinction between
sequences of spikes spaced either 50 or 49 time steps apart without the help
of any short training exemplars."
The peephole implementation is based on:
[Sak et al., 2014](https://research.google.com/pubs/archive/43905.pdf)
Example:
>>> inputs = np.random.random([30,23,9]).astype(np.float32)
>>> LSTMCell = tfa.rnn.PeepholeLSTMCell(4)
>>> rnn = tf.keras.layers.RNN(LSTMCell, return_sequences=True, return_state=True)
>>> outputs, memory_state, carry_state = rnn(inputs)
>>> outputs.shape
TensorShape([30, 23, 4])
>>> memory_state.shape
TensorShape([30, 4])
>>> carry_state.shape
TensorShape([30, 4])
"""
def build(self, input_shape):
super().build(input_shape)
# The following are the weight matrices for the peephole connections. These
# are multiplied with the previous internal state during the computation of
# carry and output.
self.input_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name="input_gate_peephole_weights",
initializer=self.kernel_initializer,
)
self.forget_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name="forget_gate_peephole_weights",
initializer=self.kernel_initializer,
)
self.output_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name="output_gate_peephole_weights",
initializer=self.kernel_initializer,
)
def _compute_carry_and_output(self, x, h_tm1, c_tm1):
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i
+ tf.keras.backend.dot(h_tm1_i, self.recurrent_kernel[:, : self.units])
+ self.input_gate_peephole_weights * c_tm1
)
f = self.recurrent_activation(
x_f
+ tf.keras.backend.dot(
h_tm1_f, self.recurrent_kernel[:, self.units : self.units * 2]
)
+ self.forget_gate_peephole_weights * c_tm1
)
c = f * c_tm1 + i * self.activation(
x_c
+ tf.keras.backend.dot(
h_tm1_c, self.recurrent_kernel[:, self.units * 2 : self.units * 3]
)
)
o = self.recurrent_activation(
x_o
+ tf.keras.backend.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3 :])
+ self.output_gate_peephole_weights * c
)
return c, o
def _compute_carry_and_output_fused(self, z, c_tm1):
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0 + self.input_gate_peephole_weights * c_tm1)
f = self.recurrent_activation(z1 + self.forget_gate_peephole_weights * c_tm1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3 + self.output_gate_peephole_weights * c)
return c, o
|
Source/prj/main/ProjectConfigChanger.py | fairhopeweb/Projeny | 752 | 12694574 |
import mtm.util.YamlSerializer as YamlSerializer
from mtm.util.Assert import *
import mtm.ioc.Container as Container
from mtm.ioc.Inject import Inject
from mtm.ioc.Inject import InjectMany
import mtm.ioc.IocAssertions as Assertions
from mtm.util.Platforms import Platforms
from prj.main.ProjenyConstants import ProjectConfigFileName
from prj.main.ProjectConfig import ProjectConfig
class ProjectConfigChanger:
_log = Inject('Logger')
_sys = Inject('SystemHelper')
_packageManager = Inject('PackageManager')
_varMgr = Inject('VarManager')
def _getProjectConfigPath(self, projectName):
return self._varMgr.expandPath('[UnityProjectsDir]/{0}/{1}'.format(projectName, ProjectConfigFileName))
def _loadProjectConfig(self, projectName):
configPath = self._getProjectConfigPath(projectName)
yamlData = YamlSerializer.deserialize(self._sys.readFileAsText(configPath))
result = ProjectConfig()
for pair in yamlData.__dict__.items():
result.__dict__[pair[0]] = pair[1]
return result
def _saveProjectConfig(self, projectName, projectConfig):
configPath = self._getProjectConfigPath(projectName)
self._sys.writeFileAsText(configPath, YamlSerializer.serialize(projectConfig))
def addPackage(self, projectName, packageName, addToAssetsFolder):
with self._log.heading('Adding package {0} to project {1}'.format(packageName, projectName)):
assertThat(packageName in self._packageManager.getAllPackageNames(), "Could not find the given package '{0}' in the UnityPackages folder", packageName)
self._packageManager.setPathsForProjectPlatform(projectName, Platforms.Windows)
projConfig = self._loadProjectConfig(projectName)
assertThat(packageName not in projConfig.assetsFolder and packageName not in projConfig.pluginsFolder,
"Given package '{0}' has already been added to project config", packageName)
if addToAssetsFolder:
projConfig.assetsFolder.append(packageName)
else:
projConfig.pluginsFolder.append(packageName)
self._saveProjectConfig(projectName, projConfig)
self._log.good("Added package '{0}' to file '{1}/{2}'", packageName, projectName, ProjectConfigFileName)
|
pybotters/models/experimental/__init__.py | maruuuui/pybotters | 176 | 12694581 | from typing import Tuple
from .bybit import BybitInverseDataStore, BybitUSDTDataStore
__all__: Tuple[str, ...] = (
'BybitInverseDataStore',
'BybitUSDTDataStore',
)
|
tests/test_issue_11.py | alekseyl1992/pyrobuf | 578 | 12694607 | def test_before_overflow():
from issue_11_proto import A
a = A()
a.a0 = 0x7FFFFFFF
assert A.FromString(a.SerializeToString()).a0 == 2147483647
def test_after_overflow():
from issue_11_proto import A
a = A()
a.a0 = 0x80000000
assert A.FromString(a.SerializeToString()).a0 == 2147483648
|
platypush/plugins/zigbee/mqtt/__init__.py | BlackLight/platypush | 228 | 12694622 | import json
import threading
from queue import Queue
from typing import Optional, List, Any, Dict, Union
from platypush.message.response import Response
from platypush.plugins.mqtt import MqttPlugin, action
from platypush.plugins.switch import SwitchPlugin
class ZigbeeMqttPlugin(MqttPlugin, SwitchPlugin): # lgtm [py/missing-call-to-init]
"""
This plugin allows you to interact with Zigbee devices over MQTT through any Zigbee sniffer and
`zigbee2mqtt <https://www.zigbee2mqtt.io/>`_.
In order to get started you'll need:
- A Zigbee USB adapter/sniffer (in this example I'll use the
`CC2531 <https://hackaday.io/project/163487-zigbee-cc2531-smart-home-usb-adapter>`_.
- A Zigbee debugger/emulator + downloader cable (only to flash the firmware).
Instructions:
- Install `cc-tool <https://github.com/dashesy/cc-tool>`_ either from sources or from a package manager.
- Connect the Zigbee to your PC/RaspberryPi in this way: ::
USB -> CC debugger -> downloader cable -> CC2531 -> USB
- The debugger and the adapter should be connected *at the same time*. If the later ``cc-tool`` command throws
up an error, put the device in sync while connected by pressing the _Reset_ button on the debugger.
- Check where the device is mapped. On Linux it will usually be ``/dev/ttyACM0``.
- Download the latest `Z-Stack firmware <https://github.com/Koenkk/Z-Stack-firmware/tree/master/coordinator>`_
to your device. Instructions for a CC2531 device:
.. code-block:: shell
wget https://github.com/Koenkk/Z-Stack-firmware/raw/master/coordinator/Z-Stack_Home_1.2/bin/default/CC2531_DEFAULT_20201127.zip
unzip CC2531_DEFAULT_20201127.zip
[sudo] cc-tool -e -w CC2531ZNP-Prod.hex
- You can disconnect your debugger and downloader cable once the firmware is flashed.
- Install ``zigbee2mqtt``. First install a node/npm environment, then either install ``zigbee2mqtt`` manually or
through your package manager. **NOTE**: many API breaking changes have occurred on Zigbee2MQTT 1.17.0,
therefore this integration will only be compatible with the version 1.17.0 of the service or higher versions.
Manual instructions:
.. code-block:: shell
# Clone zigbee2mqtt repository
[sudo] git clone https://github.com/Koenkk/zigbee2mqtt.git /opt/zigbee2mqtt
[sudo] chown -R pi:pi /opt/zigbee2mqtt # Or whichever is your user
# Install dependencies (as user "pi")
cd /opt/zigbee2mqtt
npm install
- You need to have an MQTT broker running somewhere. If not, you can install
`Mosquitto <https://mosquitto.org/>`_ through your package manager on any device in your network.
- Edit the ``/opt/zigbee2mqtt/data/configuration.yaml`` file to match the configuration of your MQTT broker:
.. code-block:: yaml
# MQTT settings
mqtt:
# MQTT base topic for zigbee2mqtt MQTT messages
base_topic: zigbee2mqtt
# MQTT server URL
server: 'mqtt://localhost'
# MQTT server authentication, uncomment if required:
# user: my_user
# password: <PASSWORD>
- Also make sure that ``permit_join`` is set to ``True``, in order to allow Zigbee devices to join the network
while you're configuring it. It's equally important to set ``permit_join`` to ``False`` once you have
configured your network, to prevent accidental/malignant joins from outer Zigbee devices.
- Start the ``zigbee2mqtt`` daemon on your device (the
`official documentation <https://www.zigbee2mqtt.io/getting_started/running_zigbee2mqtt.html#5-optional-running-as-a-daemon-with-systemctl>`_
also contains instructions on how to configure it as a ``systemd`` service:
.. code-block:: shell
cd /opt/zigbee2mqtt
npm start
- If you have Zigbee devices that are paired to other bridges, unlink them or do a factory reset to pair them
to your new bridge.
- If it all goes fine, once the daemon is running and a new device is found you should see traces like this in
the output of ``zigbee2mqtt``::
zigbee2mqtt:info 2019-11-09T12:19:56: Successfully interviewed '0x00158d0001dc126a', device has
successfully been paired
- You are now ready to use this integration.
Requires:
* **paho-mqtt** (``pip install paho-mqtt``)
"""
def __init__(self, host: str = 'localhost', port: int = 1883, base_topic: str = 'zigbee2mqtt', timeout: int = 10,
tls_certfile: Optional[str] = None, tls_keyfile: Optional[str] = None,
tls_version: Optional[str] = None, tls_ciphers: Optional[str] = None,
username: Optional[str] = None, password: Optional[str] = None, **kwargs):
"""
:param host: Default MQTT broker where ``zigbee2mqtt`` publishes its messages (default: ``localhost``).
:param port: Broker listen port (default: 1883).
:param base_topic: Topic prefix, as specified in ``/opt/zigbee2mqtt/data/configuration.yaml``
(default: '``base_topic``').
:param timeout: If the command expects from a response, then this timeout value will be used
(default: 60 seconds).
:param tls_cafile: If the connection requires TLS/SSL, specify the certificate authority file
(default: None)
:param tls_certfile: If the connection requires TLS/SSL, specify the certificate file (default: None)
:param tls_keyfile: If the connection requires TLS/SSL, specify the key file (default: None)
:param tls_version: If the connection requires TLS/SSL, specify the minimum TLS supported version
(default: None)
:param tls_ciphers: If the connection requires TLS/SSL, specify the supported ciphers (default: None)
:param username: If the connection requires user authentication, specify the username (default: None)
:param password: If the connection requires user authentication, specify the password (default: None)
"""
super().__init__(host=host, port=port, tls_certfile=tls_certfile, tls_keyfile=tls_keyfile,
tls_version=tls_version, tls_ciphers=tls_ciphers, username=username,
password=password, **kwargs)
self.base_topic = base_topic
self.timeout = timeout
self._info = {
'devices': {},
'groups': {},
}
def _get_network_info(self, **kwargs):
self.logger.info('Fetching Zigbee network information')
client = None
mqtt_args = self._mqtt_args(**kwargs)
timeout = 30
if 'timeout' in mqtt_args:
timeout = mqtt_args.pop('timeout')
info = {
'state': None,
'info': {},
'config': {},
'devices': [],
'groups': [],
}
info_ready_events = {topic: threading.Event() for topic in info.keys()}
def _on_message():
def callback(_, __, msg):
topic = msg.topic.split('/')[-1]
if topic in info:
info[topic] = msg.payload.decode() if topic == 'state' else json.loads(msg.payload.decode())
info_ready_events[topic].set()
return callback
try:
host = mqtt_args.pop('host')
port = mqtt_args.pop('port')
client = self._get_client(**mqtt_args)
client.on_message = _on_message()
client.connect(host, port, keepalive=timeout)
client.subscribe(self.base_topic + '/bridge/#')
client.loop_start()
for event in info_ready_events.values():
info_ready = event.wait(timeout=timeout)
if not info_ready:
raise TimeoutError('A timeout occurred while fetching the Zigbee network information')
# Cache the new results
self._info['devices'] = {
device.get('friendly_name', device['ieee_address']): device
for device in info.get('devices', [])
}
self._info['groups'] = {
group.get('name'): group
for group in info.get('groups', [])
}
self.logger.info('Zigbee network configuration updated')
return info
finally:
try:
client.loop_stop()
client.disconnect()
except Exception as e:
self.logger.warning('Error on MQTT client disconnection: {}'.format(str(e)))
def _topic(self, topic):
return self.base_topic + '/' + topic
@staticmethod
def _parse_response(response: Union[dict, Response]) -> dict:
if isinstance(response, Response):
response = response.output
assert response.get('status') != 'error', response.get('error', 'zigbee2mqtt error')
return response
@action
def devices(self, **kwargs) -> List[Dict[str, Any]]:
"""
Get the list of devices registered to the service.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
:return: List of paired devices. Example output:
.. code-block:: json
[
{
"date_code": "20190608",
"friendly_name": "Coordinator",
"ieee_address": "0x00123456789abcde",
"network_address": 0,
"supported": false,
"type": "Coordinator",
"interviewing": false,
"interviewing_completed": true,
"definition": null,
"endpoints": {
"13": {
"bindings": [],
"clusters": {
"input": ["genOta"],
"output": []
},
"output": []
}
}
},
{
"date_code": "20180906",
"friendly_name": "<NAME>",
"ieee_address": "0x00123456789abcdf",
"network_address": 52715,
"power_source": "Mains (single phase)",
"software_build_id": "5.127.1.26581",
"model_id": "LCT001",
"supported": true,
"interviewing": false,
"interviewing_completed": true,
"type": "Router",
"definition": {
"description": "Hue white and color ambiance E26/E27/E14",
"model": "9290012573A",
"vendor": "Philips",
"exposes": [
{
"features": [
{
"access": 7,
"description": "On/off state of this light",
"name": "state",
"property": "state",
"type": "binary",
"value_off": "OFF",
"value_on": "ON",
"value_toggle": "TOGGLE"
},
{
"access": 7,
"description": "Brightness of this light",
"name": "brightness",
"property": "brightness",
"type": "numeric",
"value_max": 254,
"value_min": 0
},
{
"access": 7,
"description": "Color temperature of this light",
"name": "color_temp",
"property": "color_temp",
"type": "numeric",
"unit": "mired",
"value_max": 500,
"value_min": 150
},
{
"description": "Color of this light in the CIE 1931 color space (x/y)",
"features": [
{
"access": 7,
"name": "x",
"property": "x",
"type": "numeric"
},
{
"access": 7,
"name": "y",
"property": "y",
"type": "numeric"
}
],
"name": "color_xy",
"property": "color",
"type": "composite"
}
],
"type": "light"
},
{
"access": 2,
"description": "Triggers an effect on the light (e.g. make light blink for a few seconds)",
"name": "effect",
"property": "effect",
"type": "enum",
"values": [
"blink",
"breathe",
"okay",
"channel_change",
"finish_effect",
"stop_effect"
]
},
{
"access": 1,
"description": "Link quality (signal strength)",
"name": "linkquality",
"property": "linkquality",
"type": "numeric",
"unit": "lqi",
"value_max": 255,
"value_min": 0
}
]
},
"endpoints": {
"11": {
"bindings": [],
"clusters": {
"input": [
"genBasic",
"genIdentify",
"genGroups",
"genScenes",
"genOnOff",
"genLevelCtrl",
"touchlink",
"lightingColorCtrl",
"manuSpecificUbisysDimmerSetup"
],
"output": [
"genOta"
]
},
"configured_reportings": []
},
"242": {
"bindings": [],
"clusters": {
"input": [
"greenPower"
],
"output": [
"greenPower"
]
},
"configured_reportings": []
}
}
}
]
"""
return self._get_network_info(**kwargs).get('devices')
@action
def permit_join(self, permit: bool = True, timeout: Optional[float] = None, **kwargs):
"""
Enable/disable devices from joining the network. This is not persistent (will not be saved to
``configuration.yaml``).
:param permit: Set to True to allow joins, False otherwise.
:param timeout: Allow/disallow joins only for this amount of time.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
if timeout:
return self._parse_response(
self.publish(topic=self._topic('bridge/request/permit_join'),
msg={'value': permit, 'time': timeout},
reply_topic=self._topic('bridge/response/permit_join'),
**self._mqtt_args(**kwargs)))
return self.publish(topic=self._topic('bridge/request/permit_join'),
msg={'value': permit},
**self._mqtt_args(**kwargs))
@action
def factory_reset(self, **kwargs):
"""
Perform a factory reset of a device connected to the network, following the procedure required by the particular
device (for instance, Hue bulbs require the Zigbee adapter to be close to the device while a button on the back
of the bulb is pressed).
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
self.publish(topic=self._topic('bridge/request/touchlink/factory_reset'), msg='', **self._mqtt_args(**kwargs))
@action
def log_level(self, level: str, **kwargs):
"""
Change the log level at runtime. This change will not be persistent.
:param level: Possible values: 'debug', 'info', 'warn', 'error'.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
return self._parse_response(
self.publish(topic=self._topic('bridge/request/config/log_level'), msg={'value': level},
reply_topic=self._topic('bridge/response/config/log_level'),
**self._mqtt_args(**kwargs)))
@action
def device_set_option(self, device: str, option: str, value: Any, **kwargs):
"""
Change the options of a device. Options can only be changed, not added or deleted.
:param device: Display name of the device.
:param option: Option name.
:param value: New value.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
return self._parse_response(
self.publish(topic=self._topic('bridge/request/device/options'),
reply_topic=self._topic('bridge/response/device/options'),
msg={
'id': device,
'options': {
option: value,
}
}, **self._mqtt_args(**kwargs)))
@action
def device_remove(self, device: str, force: bool = False, **kwargs):
"""
Remove a device from the network.
:param device: Display name of the device.
:param force: Force the remove also if the removal wasn't acknowledged by the device. Note: a forced remove
only removes the entry from the internal database, but the device is likely to connect again when
restarted unless it's factory reset (default: False).
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
return self._parse_response(
self.publish(topic=self._topic('bridge/request/device/remove'),
msg={'id': device, 'force': force},
reply_topic=self._topic('bridge/response/device/remove'),
**self._mqtt_args(**kwargs)))
@action
def device_ban(self, device: str, **kwargs):
"""
Ban a device from the network.
:param device: Display name of the device.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
return self._parse_response(
self.publish(topic=self._topic('bridge/request/device/ban'),
reply_topic=self._topic('bridge/response/device/ban'),
msg={'id': device},
**self._mqtt_args(**kwargs)))
@action
def device_whitelist(self, device: str, **kwargs):
"""
Whitelist a device on the network. Note: once at least a device is whitelisted, all the other non-whitelisted
devices will be removed from the network.
:param device: Display name of the device.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
return self._parse_response(
self.publish(topic=self._topic('bridge/request/device/whitelist'),
reply_topic=self._topic('bridge/response/device/whitelist'),
msg={'id': device},
**self._mqtt_args(**kwargs)))
@action
def device_rename(self, name: str, device: Optional[str] = None, **kwargs):
"""
Rename a device on the network.
:param name: New name.
:param device: Current name of the device to rename. If no name is specified then the rename will
affect the last device that joined the network.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
if name == device:
self.logger.info('Old and new name are the same: nothing to do')
return
# noinspection PyUnresolvedReferences
devices = self.devices().output
assert not [dev for dev in devices if dev.get('friendly_name') == name], \
'A device named {} already exists on the network'.format(name)
if device:
req = {
'from': device,
'to': name,
}
else:
req = {
'last': True,
'to': name,
}
return self._parse_response(
self.publish(topic=self._topic('bridge/request/device/rename'),
msg=req,
reply_topic=self._topic('bridge/response/device/rename'),
**self._mqtt_args(**kwargs)))
@staticmethod
def build_device_get_request(values: List[Dict[str, Any]]) -> dict:
def extract_value(value: dict, root: dict):
if not value.get('access', 1) & 0x1:
# Property not readable
return
if 'features' not in value:
if 'property' in value:
root[value['property']] = 0 if value['type'] == 'numeric' else ''
return
if 'property' in value:
root[value['property']] = root.get(value['property'], {})
root = root[value['property']]
for feature in value['features']:
extract_value(feature, root)
ret = {}
for value in values:
extract_value(value, root=ret)
return ret
# noinspection PyShadowingBuiltins
@action
def device_get(self, device: str, property: Optional[str] = None, **kwargs) -> Dict[str, Any]:
"""
Get the properties of a device. The returned keys vary depending on the device. For example, a light bulb
may have the "``state``" and "``brightness``" properties, while an environment sensor may have the
"``temperature``" and "``humidity``" properties, and so on.
:param device: Display name of the device.
:param property: Name of the property that should be retrieved (default: all).
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
:return: Key->value map of the device properties.
"""
kwargs = self._mqtt_args(**kwargs)
if property:
properties = self.publish(topic=self._topic(device) + '/get/' + property, reply_topic=self._topic(device),
msg={property: ''}, **kwargs).output
assert property in properties, 'No such property: ' + property
return {property: properties[property]}
if device not in self._info.get('devices', {}):
# Refresh devices info
self._get_network_info(**kwargs)
assert self._info.get('devices', {}).get(device), 'No such device: ' + device
exposes = (self._info.get('devices', {}).get(device, {}).get('definition', {}) or {}).get('exposes', [])
if not exposes:
return {}
return self.publish(topic=self._topic(device) + '/get', reply_topic=self._topic(device),
msg=self.build_device_get_request(exposes), **kwargs)
@action
def devices_get(self, devices: Optional[List[str]] = None, **kwargs) -> Dict[str, dict]:
"""
Get the properties of the devices connected to the network.
:param devices: If set, then only the status of these devices (by friendly name) will be retrieved (default:
retrieve all).
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
:return: Key->value map of the device properties:
.. code-block:: json
{
"Bulb": {
"state": "ON",
"brightness": 254
},
"Sensor": {
"temperature": 22.5
}
}
"""
kwargs = self._mqtt_args(**kwargs)
if not devices:
# noinspection PyUnresolvedReferences
devices = set([
device['friendly_name'] or device['ieee_address']
for device in self.devices(**kwargs).output
])
def worker(device: str, q: Queue):
# noinspection PyUnresolvedReferences
q.put(self.device_get(device, **kwargs).output)
queues = {}
workers = {}
response = {}
for device in devices:
queues[device] = Queue()
workers[device] = threading.Thread(target=worker, args=(device, queues[device]))
workers[device].start()
for device in devices:
try:
response[device] = queues[device].get(timeout=kwargs.get('timeout'))
workers[device].join(timeout=kwargs.get('timeout'))
except Exception as e:
self.logger.warning('An error while getting the status of the device {}: {}'.format(
device, str(e)))
return response
@action
def status(self, device: Optional[str] = None, *args, **kwargs):
"""
Get the status of a device (by friendly name) or of all the connected devices (it wraps :meth:`.devices_get`).
:param device: Device friendly name (default: get all devices).
"""
return self.devices_get([device], *args, **kwargs)
# noinspection PyShadowingBuiltins,DuplicatedCode
@action
def device_set(self, device: str, property: str, value: Any, **kwargs):
"""
Set a properties on a device. The compatible properties vary depending on the device. For example, a light bulb
may have the "``state``" and "``brightness``" properties, while an environment sensor may have the
"``temperature``" and "``humidity``" properties, and so on.
:param device: Display name of the device.
:param property: Name of the property that should be set.
:param value: New value of the property.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
properties = self.publish(topic=self._topic(device + '/set'),
reply_topic=self._topic(device),
msg={property: value}, **self._mqtt_args(**kwargs)).output
if property:
assert property in properties, 'No such property: ' + property
return {property: properties[property]}
return properties
@action
def device_check_ota_updates(self, device: str, **kwargs) -> dict:
"""
Check if the specified device has any OTA updates available to install.
:param device: Address or friendly name of the device.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
:return:
.. code-block:: json
{
"id": "<device ID>",
"update_available": true,
"status": "ok"
}
"""
ret = self._parse_response(
self.publish(topic=self._topic('bridge/request/device/ota_update/check'),
reply_topic=self._topic('bridge/response/device/ota_update/check'),
msg={'id': device}, **self._mqtt_args(**kwargs)))
return {
'status': ret['status'],
'id': ret.get('data', {}).get('id'),
'update_available': ret.get('data', {}).get('update_available', False),
}
@action
def device_install_ota_updates(self, device: str, **kwargs):
"""
Install OTA updates for a device if available.
:param device: Address or friendly name of the device.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
return self._parse_response(
self.publish(topic=self._topic('bridge/request/device/ota_update/update'),
reply_topic=self._topic('bridge/response/device/ota_update/update'),
msg={'id': device}, **self._mqtt_args(**kwargs)))
@action
def groups(self, **kwargs) -> List[dict]:
"""
Get the groups registered on the device.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
return self._get_network_info(**kwargs).get('groups')
@action
def info(self, **kwargs) -> dict:
"""
Get the information, configuration and state of the network.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
:return: Example:
.. code-block:: json
{
"state": "online",
"commit": "07cdc9d",
"config": {
"advanced": {
"adapter_concurrent": null,
"adapter_delay": null,
"availability_blacklist": [],
"availability_blocklist": [],
"availability_passlist": [],
"availability_timeout": 0,
"availability_whitelist": [],
"cache_state": true,
"cache_state_persistent": true,
"cache_state_send_on_startup": true,
"channel": 11,
"elapsed": false,
"ext_pan_id": [
221,
221,
221,
221,
221,
221,
221,
221
],
"homeassistant_discovery_topic": "homeassistant",
"homeassistant_legacy_triggers": true,
"homeassistant_status_topic": "hass/status",
"last_seen": "disable",
"legacy_api": true,
"log_directory": "/opt/zigbee2mqtt/data/log/%TIMESTAMP%",
"log_file": "log.txt",
"log_level": "debug",
"log_output": [
"console",
"file"
],
"log_rotation": true,
"log_syslog": {},
"pan_id": 6754,
"report": false,
"soft_reset_timeout": 0,
"timestamp_format": "YYYY-MM-DD HH:mm:ss"
},
"ban": [],
"blocklist": [],
"device_options": {},
"devices": {
"0x00123456789abcdf": {
"friendly_name": "My Lightbulb"
}
},
"experimental": {
"output": "json"
},
"external_converters": [],
"groups": {},
"homeassistant": false,
"map_options": {
"graphviz": {
"colors": {
"fill": {
"coordinator": "#e04e5d",
"enddevice": "#fff8ce",
"router": "#4ea3e0"
},
"font": {
"coordinator": "#ffffff",
"enddevice": "#000000",
"router": "#ffffff"
},
"line": {
"active": "#009900",
"inactive": "#994444"
}
}
}
},
"mqtt": {
"base_topic": "zigbee2mqtt",
"force_disable_retain": false,
"include_device_information": false,
"server": "mqtt://localhost"
},
"passlist": [],
"permit_join": true,
"serial": {
"disable_led": false,
"port": "/dev/ttyUSB0"
},
"whitelist": []
},
"coordinator": {
"meta": {
"maintrel": 3,
"majorrel": 2,
"minorrel": 6,
"product": 0,
"revision": 20190608,
"transportrev": 2
},
"type": "zStack12"
},
"log_level": "debug",
"network": {
"channel": 11,
"extended_pan_id": "0xdddddddddddddddd",
"pan_id": 6754
},
"permit_join": true,
"version": "1.17.0"
}
"""
info = self._get_network_info(**kwargs)
return {
'state': info.get('state'),
'info': info.get('info'),
}
@action
def group_add(self, name: str, id: Optional[int] = None, **kwargs):
"""
Add a new group.
:param name: Display name of the group.
:param id: Optional numeric ID (default: auto-generated).
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
payload = name if id is None else {
'id': id,
'friendly_name': name,
}
return self._parse_response(
self.publish(topic=self._topic('bridge/request/group/add'),
reply_topic=self._topic('bridge/response/group/add'),
msg=payload,
**self._mqtt_args(**kwargs))
)
@action
def group_get(self, group: str, property: Optional[str] = None, **kwargs) -> dict:
"""
Get one or more properties of a group. The compatible properties vary depending on the devices on the group.
For example, a light bulb may have the "``state``" (with values ``"ON"`` and ``"OFF"``) and "``brightness``"
properties, while an environment sensor may have the "``temperature``" and "``humidity``" properties, and so on.
:param group: Display name of the group.
:param property: Name of the property to retrieve (default: all available properties)
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
msg = {}
if property:
msg = {property: ''}
properties = self.publish(topic=self._topic(group + '/get'),
reply_topic=self._topic(group),
msg=msg, **self._mqtt_args(**kwargs)).output
if property:
assert property in properties, 'No such property: ' + property
return {property: properties[property]}
return properties
# noinspection PyShadowingBuiltins,DuplicatedCode
@action
def group_set(self, group: str, property: str, value: Any, **kwargs):
"""
Set a properties on a group. The compatible properties vary depending on the devices on the group.
For example, a light bulb may have the "``state``" (with values ``"ON"`` and ``"OFF"``) and "``brightness``"
properties, while an environment sensor may have the "``temperature``" and "``humidity``" properties, and so on.
:param group: Display name of the group.
:param property: Name of the property that should be set.
:param value: New value of the property.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
properties = self.publish(topic=self._topic(group + '/set'),
reply_topic=self._topic(group),
msg={property: value}, **self._mqtt_args(**kwargs)).output
if property:
assert property in properties, 'No such property: ' + property
return {property: properties[property]}
return properties
@action
def group_rename(self, name: str, group: str, **kwargs):
"""
Rename a group.
:param name: New name.
:param group: Current name of the group to rename.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
if name == group:
self.logger.info('Old and new name are the same: nothing to do')
return
# noinspection PyUnresolvedReferences
groups = {group.get('friendly_name'): group for group in self.groups().output}
assert name not in groups, 'A group named {} already exists on the network'.format(name)
return self._parse_response(
self.publish(topic=self._topic('bridge/request/group/rename'),
reply_topic=self._topic('bridge/response/group/rename'),
msg={'from': group, 'to': name} if group else name,
**self._mqtt_args(**kwargs)))
@action
def group_remove(self, name: str, **kwargs):
"""
Remove a group.
:param name: Display name of the group.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
return self._parse_response(
self.publish(topic=self._topic('bridge/request/group/remove'),
reply_topic=self._topic('bridge/response/group/remove'),
msg=name,
**self._mqtt_args(**kwargs)))
@action
def group_add_device(self, group: str, device: str, **kwargs):
"""
Add a device to a group.
:param group: Display name of the group.
:param device: Display name of the device to be added.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
return self._parse_response(
self.publish(topic=self._topic('bridge/request/group/members/add'),
reply_topic=self._topic('bridge/response/group/members/add'),
msg={
'group': group,
'device': device,
}, **self._mqtt_args(**kwargs)))
@action
def group_remove_device(self, group: str, device: Optional[str] = None, **kwargs):
"""
Remove a device from a group.
:param group: Display name of the group.
:param device: Display name of the device to be removed. If none is specified then all the devices registered
to the specified group will be removed.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
return self._parse_response(
self.publish(
topic=self._topic('bridge/request/group/members/remove{}'.format('_all' if device is None else '')),
reply_topic=self._topic(
'bridge/response/group/members/remove{}'.format('_all' if device is None else '')),
msg={
'group': group,
'device': device,
}, **self._mqtt_args(**kwargs)))
@action
def bind_devices(self, source: str, target: str, **kwargs):
"""
Bind two devices. Binding makes it possible that devices can directly control each other without the
intervention of zigbee2mqtt or any home automation software. You may want to use this feature to bind
for example an IKEA/Philips Hue dimmer switch to a light bulb, or a Zigbee remote to a thermostat.
Read more on the `zigbee2mqtt binding page <https://www.zigbee2mqtt.io/information/binding.html>`_.
:param source: Name of the source device. It can also be a group name, although the support is
`still experimental <https://www.zigbee2mqtt.io/information/binding.html#binding-a-group>`_.
You can also bind a specific device endpoint - for example ``MySensor/temperature``.
:param target: Name of the target device.
You can also bind a specific device endpoint - for example ``MyLight/state``.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
return self._parse_response(
self.publish(topic=self._topic('bridge/request/device/bind'),
reply_topic=self._topic('bridge/response/device/bind'),
msg={'from': source, 'to': target}, **self._mqtt_args(**kwargs)))
@action
def unbind_devices(self, source: str, target: str, **kwargs):
"""
Un-bind two devices.
:param source: Name of the source device.
You can also bind a specific device endpoint - for example ``MySensor/temperature``.
:param target: Name of the target device.
You can also bind a specific device endpoint - for example ``MyLight/state``.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""
return self._parse_response(
self.publish(topic=self._topic('bridge/request/device/unbind'),
reply_topic=self._topic('bridge/response/device/unbind'),
msg={'from': source, 'to': target}, **self._mqtt_args(**kwargs)))
@action
def on(self, device, *args, **kwargs) -> dict:
"""
Implements :meth:`platypush.plugins.switch.plugin.SwitchPlugin.on` and turns on a Zigbee device with a writable
binary property.
"""
switch_info = self._get_switches_info().get(device)
assert switch_info, '{} is not a valid switch'.format(device)
props = self.device_set(device, switch_info['property'], switch_info['value_on']).output
return self._properties_to_switch(device=device, props=props, switch_info=switch_info)
@action
def off(self, device, *args, **kwargs) -> dict:
"""
Implements :meth:`platypush.plugins.switch.plugin.SwitchPlugin.off` and turns off a Zigbee device with a
writable binary property.
"""
switch_info = self._get_switches_info().get(device)
assert switch_info, '{} is not a valid switch'.format(device)
props = self.device_set(device, switch_info['property'], switch_info['value_off']).output
return self._properties_to_switch(device=device, props=props, switch_info=switch_info)
@action
def toggle(self, device, *args, **kwargs) -> dict:
"""
Implements :meth:`platypush.plugins.switch.plugin.SwitchPlugin.toggle` and toggles a Zigbee device with a
writable binary property.
"""
switch_info = self._get_switches_info().get(device)
assert switch_info, '{} is not a valid switch'.format(device)
props = self.device_set(device, switch_info['property'], switch_info['value_toggle']).output
return self._properties_to_switch(device=device, props=props, switch_info=switch_info)
@staticmethod
def _properties_to_switch(device: str, props: dict, switch_info: dict) -> dict:
return {
'on': props[switch_info['property']] == switch_info['value_on'],
'friendly_name': device,
'name': device,
**props,
}
def _get_switches_info(self) -> dict:
def switch_info(device_info: dict) -> dict:
exposes = (device_info.get('definition', {}) or {}).get('exposes', [])
for exposed in exposes:
for feature in exposed.get('features', []):
if feature.get('type') == 'binary' and 'value_on' in feature and 'value_off' in feature and \
feature.get('access', 0) & 2:
return {
'property': feature['property'],
'value_on': feature['value_on'],
'value_off': feature['value_off'],
'value_toggle': feature.get('value_toggle', None),
}
return {}
# noinspection PyUnresolvedReferences
devices = self.devices().output
switches_info = {}
for device in devices:
info = switch_info(device)
if not info:
continue
switches_info[device.get('friendly_name', device.get('ieee_address'))] = info
return switches_info
@property
def switches(self) -> List[dict]:
"""
Implements the :class:`platypush.plugins.switch.SwitchPlugin.switches` property and returns the state of any
device on the Zigbee network identified as a switch (a device is identified as a switch if it exposes a writable
``state`` property that can be set to ``ON`` or ``OFF``).
"""
switches_info = self._get_switches_info()
# noinspection PyUnresolvedReferences
return [
self._properties_to_switch(device=name, props=switch, switch_info=switches_info[name])
for name, switch in self.devices_get(list(switches_info.keys())).output.items()
]
# vim:sw=4:ts=4:et:
|
dsebm/svhn_utilities.py | leyiweb/Adversarially-Learned-Anomaly-Detection | 125 | 12694623 | # TensorFlow implementation of a DCGAN model for SVHN
import tensorflow as tf
init_kernel = tf.contrib.layers.xavier_initializer()
image_size = 32
learning_rate = 0.003
batch_size = 32
kernel_conv_size = 3
filters_conv = 64
filters_fc = 128
strides_conv = 2
def UnPooling2x2ZeroFilled(x):
# https://github.com/tensorflow/tensorflow/issues/2169
out = tf.concat([x, tf.zeros_like(x)], 3)
out = tf.concat([out, tf.zeros_like(out)], 2)
sh = x.get_shape().as_list()
if None not in sh[1:]:
out_size = [-1, sh[1] * 2, sh[2] * 2, sh[3]]
return tf.reshape(out, out_size)
else:
shv = tf.shape(x)
ret = tf.reshape(out, tf.stack([-1, shv[1] * 2, shv[2] * 2, sh[3]]))
return ret
def network(x_inp, is_training=False, getter=None, reuse=False):
""" Network architecture in tensorflow
Discriminates between real data and generated data
Note:
Provides histogram and distribution tensorflow summaries
Args:
x_inp (tensor): input data for the encoder.
reuse (bool): sharing variables or not
Returns:
logits (tensor): last activation layer of the discriminator (shape 1)
intermediate_layer (tensor): intermediate layer for feature matching
"""
with tf.variable_scope('network', reuse=reuse, custom_getter=getter):
kernel_conv = tf.get_variable('kernel_conv', [kernel_conv_size, kernel_conv_size, 3, filters_conv], initializer=init_kernel)
conv_output_size = int(image_size*image_size/4/strides_conv/strides_conv*filters_conv)
kernel_dense = tf.get_variable('kernel_dense', [conv_output_size, filters_fc], initializer=init_kernel)
bias_dense = tf.get_variable('bias_dense', [filters_fc])
bias_inv_dense = tf.get_variable('bias_inv_dense', [conv_output_size])
x = tf.nn.conv2d(x_inp,
kernel_conv,
[1, strides_conv, strides_conv, 1],
'SAME')
x = tf.nn.softplus(x)
x = tf.nn.pool(x, (2, 2), "MAX", "SAME", strides=(2, 2))
x = tf.contrib.layers.flatten(x)
x = tf.nn.softplus(tf.matmul(x, kernel_dense) + bias_dense)
###INVERSE LAYERS
x = tf.nn.softplus(tf.matmul(x, tf.transpose(kernel_dense)) + bias_inv_dense)
new_image_size = int(image_size/2/strides_conv)
x = tf.reshape(x, [-1, new_image_size, new_image_size, filters_conv])
x = UnPooling2x2ZeroFilled(x)
x = tf.nn.conv2d_transpose(x,
kernel_conv,
tf.shape(x_inp),
[1, strides_conv, strides_conv, 1],
'SAME')
x = tf.nn.softplus(x, name='softplus')
return x
|
adb/systrace/catapult/systrace/systrace/monitor_unittest.py | mohanedmoh/TBS | 2,151 | 12694638 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from systrace import decorators
from systrace import update_systrace_trace_viewer
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
STABLE_VIEWER_PATH = os.path.join(SCRIPT_DIR, 'systrace_trace_viewer.html')
# Tests presence and content of static HTML files used not only for Python
# systrace capture, but also Java-based capture in the android SDK tools.
#
# NOTE: changes to this file should typically be accompanied by changes to the
# Android SDK's method of systrace capture.
class MonitorTest(unittest.TestCase):
@decorators.HostOnlyTest
def test_systrace_trace_viewer(self):
self.assertEqual(STABLE_VIEWER_PATH,
update_systrace_trace_viewer.SYSTRACE_TRACE_VIEWER_HTML_FILE)
update_systrace_trace_viewer.update(force_update=True)
with open(STABLE_VIEWER_PATH) as f:
content = f.read().strip()
# expect big html file
self.assertGreater(5 * 1024 * 1024, len(content))
self.assertEqual('<', content[0])
os.remove(f.name)
@decorators.HostOnlyTest
def test_prefix(self):
with open(os.path.join(SCRIPT_DIR, 'prefix.html')) as f:
content = f.read().strip()
self.assertTrue("<html>" in content)
self.assertTrue("<title>Android System Trace</title>" in content)
self.assertTrue("{{SYSTRACE_TRACE_VIEWER_HTML}}" in content)
@decorators.HostOnlyTest
def test_suffix(self):
with open(os.path.join(SCRIPT_DIR, 'suffix.html')) as f:
content = f.read().strip()
self.assertTrue("</html>" in content)
|
mmd_tools/panels/prop_bone.py | lsr123/PX4-loacl_code | 822 | 12694643 | # -*- coding: utf-8 -*-
from bpy.types import Panel
class MMDBonePanel(Panel):
bl_idname = 'BONE_PT_mmd_tools_bone'
bl_label = 'MMD Bone Tools'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'bone'
@classmethod
def poll(cls, context):
return context.mode == 'EDIT_ARMATURE' and context.active_bone is not None or context.mode == 'POSE' and context.active_pose_bone is not None
def draw(self, context):
if context.mode == 'EDIT_ARMATURE':
edit_bone = context.active_bone
pose_bone = context.active_object.pose.bones[edit_bone.name]
else:
pose_bone = context.active_pose_bone
layout = self.layout
c = layout.column(align=True)
c.label('Information:')
c.prop(pose_bone.mmd_bone, 'name_j')
c.prop(pose_bone.mmd_bone, 'name_e')
c.label(text='ID: %d'%(pose_bone.mmd_bone.bone_id))
c = layout.column(align=True)
row = c.row()
row.prop(pose_bone.mmd_bone, 'transform_order')
row.prop(pose_bone.mmd_bone, 'transform_after_dynamics')
row.prop(pose_bone.mmd_bone, 'is_visible')
row = c.row()
row.prop(pose_bone.mmd_bone, 'is_controllable')
row.prop(pose_bone.mmd_bone, 'is_tip')
row.prop(pose_bone.mmd_bone, 'enabled_local_axes')
row = c.row()
row.prop(pose_bone.mmd_bone, 'enabled_fixed_axis')
row.prop(pose_bone.mmd_bone, 'use_tail_location')
row = layout.row(align=True)
c = row.column()
c.prop(pose_bone.mmd_bone, 'local_axis_x')
c = row.column()
c.prop(pose_bone.mmd_bone, 'local_axis_z')
c = layout.column()
row = layout.row(align=True)
c = row.column()
c.prop(pose_bone.mmd_bone, 'fixed_axis')
class MMDBoneATPanel(Panel):
bl_idname = 'BONE_PT_mmd_tools_bone_at'
bl_label = 'MMD Additional Transformation'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'bone'
@classmethod
def poll(cls, context):
return context.mode == 'EDIT_ARMATURE' and context.active_bone is not None or context.mode == 'POSE' and context.active_pose_bone is not None
def draw(self, context):
if context.mode == 'EDIT_ARMATURE':
edit_bone = context.active_bone
pose_bone = context.active_object.pose.bones[edit_bone.name]
else:
pose_bone = context.active_pose_bone
layout = self.layout
c = layout.column(align=True)
if pose_bone.mmd_bone.is_additional_transform_dirty:
c.label(text='Changes has not been applied.', icon='ERROR')
row = c.row()
row.prop(pose_bone.mmd_bone, 'has_additional_rotation', text='Rotation')
row.prop(pose_bone.mmd_bone, 'has_additional_location', text='Location')
c = layout.column(align=True)
c.prop_search(pose_bone.mmd_bone, 'additional_transform_bone', pose_bone.id_data.pose, 'bones', icon='BONE_DATA', text='')
# mmd_bone = MMDBone(pose_bone)
# if mmd_bone.has_additional_transform_constraint():
# constraint = mmd_bone.get_additional_transform_constraint()
# c.prop_search(constraint, 'subtarget', constraint.target.pose, 'bones', icon='BONE_DATA', text='Additional Transform Bone')
# else:
# c.operator('mmd_tools.bone_add_additional_transform')
c.prop(pose_bone.mmd_bone, 'additional_transform_influence', text='Influence')
|
server/tests-py/test_schema_duplication.py | gh-oss-contributor/graphql-engine-1 | 27,416 | 12694646 | #!/usrbin/env python3
import pytest
from validate import check_query_f
@pytest.mark.usefixtures('per_method_tests_db_state')
class TestSchemaDuplication:
@classmethod
def dir(cls):
return "queries/schema/duplication/"
def test_create_action_followed_by_track_table(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + "create_action_and_track_table_fail.yaml")
def test_track_table_followed_by_create_action(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + "track_table_and_create_action_fail.yaml")
def test_track_table_with_conflicting_custom_root_node_names(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + 'track_table_with_conflicting_custom_root_node_names_fail.yaml')
|
phy/cluster/__init__.py | fjflores/phy | 118 | 12694650 | # -*- coding: utf-8 -*-
# flake8: noqa
"""Manual clustering facilities."""
from ._utils import ClusterMeta, UpdateInfo
from .clustering import Clustering
from .supervisor import Supervisor, ClusterView, SimilarityView
from .views import * # noqa
|
sdk/python/pulumi_azure/iot/time_series_insights_event_source_eventhub.py | henriktao/pulumi-azure | 109 | 12694654 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['TimeSeriesInsightsEventSourceEventhubArgs', 'TimeSeriesInsightsEventSourceEventhub']
@pulumi.input_type
class TimeSeriesInsightsEventSourceEventhubArgs:
def __init__(__self__, *,
consumer_group_name: pulumi.Input[str],
environment_id: pulumi.Input[str],
event_source_resource_id: pulumi.Input[str],
eventhub_name: pulumi.Input[str],
namespace_name: pulumi.Input[str],
shared_access_key: pulumi.Input[str],
shared_access_key_name: pulumi.Input[str],
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
timestamp_property_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a TimeSeriesInsightsEventSourceEventhub resource.
:param pulumi.Input[str] consumer_group_name: Specifies the name of the EventHub Consumer Group that holds the partitions from which events will be read.
:param pulumi.Input[str] environment_id: Specifies the id of the IoT Time Series Insights Environment that the Event Source should be associated with. Changing this forces a new resource to created.
:param pulumi.Input[str] event_source_resource_id: Specifies the resource id where events will be coming from.
:param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub which will be associated with this resource.
:param pulumi.Input[str] namespace_name: Specifies the EventHub Namespace name.
:param pulumi.Input[str] shared_access_key: Specifies the value of the Shared Access Policy key that grants the Time Series Insights service read access to the EventHub.
:param pulumi.Input[str] shared_access_key_name: Specifies the name of the Shared Access key that grants the Event Source access to the EventHub.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Azure IoT Time Series Insights EventHub Event Source. Changing this forces a new resource to be created. Must be globally unique.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] timestamp_property_name: Specifies the value that will be used as the event source's timestamp. This value defaults to the event creation time.
"""
pulumi.set(__self__, "consumer_group_name", consumer_group_name)
pulumi.set(__self__, "environment_id", environment_id)
pulumi.set(__self__, "event_source_resource_id", event_source_resource_id)
pulumi.set(__self__, "eventhub_name", eventhub_name)
pulumi.set(__self__, "namespace_name", namespace_name)
pulumi.set(__self__, "shared_access_key", shared_access_key)
pulumi.set(__self__, "shared_access_key_name", shared_access_key_name)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if timestamp_property_name is not None:
pulumi.set(__self__, "timestamp_property_name", timestamp_property_name)
@property
@pulumi.getter(name="consumerGroupName")
def consumer_group_name(self) -> pulumi.Input[str]:
"""
Specifies the name of the EventHub Consumer Group that holds the partitions from which events will be read.
"""
return pulumi.get(self, "consumer_group_name")
@consumer_group_name.setter
def consumer_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "consumer_group_name", value)
@property
@pulumi.getter(name="environmentId")
def environment_id(self) -> pulumi.Input[str]:
"""
Specifies the id of the IoT Time Series Insights Environment that the Event Source should be associated with. Changing this forces a new resource to created.
"""
return pulumi.get(self, "environment_id")
@environment_id.setter
def environment_id(self, value: pulumi.Input[str]):
pulumi.set(self, "environment_id", value)
@property
@pulumi.getter(name="eventSourceResourceId")
def event_source_resource_id(self) -> pulumi.Input[str]:
"""
Specifies the resource id where events will be coming from.
"""
return pulumi.get(self, "event_source_resource_id")
@event_source_resource_id.setter
def event_source_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "event_source_resource_id", value)
@property
@pulumi.getter(name="eventhubName")
def eventhub_name(self) -> pulumi.Input[str]:
"""
Specifies the name of the EventHub which will be associated with this resource.
"""
return pulumi.get(self, "eventhub_name")
@eventhub_name.setter
def eventhub_name(self, value: pulumi.Input[str]):
pulumi.set(self, "eventhub_name", value)
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> pulumi.Input[str]:
"""
Specifies the EventHub Namespace name.
"""
return pulumi.get(self, "namespace_name")
@namespace_name.setter
def namespace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace_name", value)
@property
@pulumi.getter(name="sharedAccessKey")
def shared_access_key(self) -> pulumi.Input[str]:
"""
Specifies the value of the Shared Access Policy key that grants the Time Series Insights service read access to the EventHub.
"""
return pulumi.get(self, "shared_access_key")
@shared_access_key.setter
def shared_access_key(self, value: pulumi.Input[str]):
pulumi.set(self, "shared_access_key", value)
@property
@pulumi.getter(name="sharedAccessKeyName")
def shared_access_key_name(self) -> pulumi.Input[str]:
"""
Specifies the name of the Shared Access key that grants the Event Source access to the EventHub.
"""
return pulumi.get(self, "shared_access_key_name")
@shared_access_key_name.setter
def shared_access_key_name(self, value: pulumi.Input[str]):
pulumi.set(self, "shared_access_key_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Azure IoT Time Series Insights EventHub Event Source. Changing this forces a new resource to be created. Must be globally unique.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="timestampPropertyName")
def timestamp_property_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the value that will be used as the event source's timestamp. This value defaults to the event creation time.
"""
return pulumi.get(self, "timestamp_property_name")
@timestamp_property_name.setter
def timestamp_property_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timestamp_property_name", value)
@pulumi.input_type
class _TimeSeriesInsightsEventSourceEventhubState:
def __init__(__self__, *,
consumer_group_name: Optional[pulumi.Input[str]] = None,
environment_id: Optional[pulumi.Input[str]] = None,
event_source_resource_id: Optional[pulumi.Input[str]] = None,
eventhub_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
shared_access_key: Optional[pulumi.Input[str]] = None,
shared_access_key_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
timestamp_property_name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering TimeSeriesInsightsEventSourceEventhub resources.
:param pulumi.Input[str] consumer_group_name: Specifies the name of the EventHub Consumer Group that holds the partitions from which events will be read.
:param pulumi.Input[str] environment_id: Specifies the id of the IoT Time Series Insights Environment that the Event Source should be associated with. Changing this forces a new resource to created.
:param pulumi.Input[str] event_source_resource_id: Specifies the resource id where events will be coming from.
:param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub which will be associated with this resource.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Azure IoT Time Series Insights EventHub Event Source. Changing this forces a new resource to be created. Must be globally unique.
:param pulumi.Input[str] namespace_name: Specifies the EventHub Namespace name.
:param pulumi.Input[str] shared_access_key: Specifies the value of the Shared Access Policy key that grants the Time Series Insights service read access to the EventHub.
:param pulumi.Input[str] shared_access_key_name: Specifies the name of the Shared Access key that grants the Event Source access to the EventHub.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] timestamp_property_name: Specifies the value that will be used as the event source's timestamp. This value defaults to the event creation time.
"""
if consumer_group_name is not None:
pulumi.set(__self__, "consumer_group_name", consumer_group_name)
if environment_id is not None:
pulumi.set(__self__, "environment_id", environment_id)
if event_source_resource_id is not None:
pulumi.set(__self__, "event_source_resource_id", event_source_resource_id)
if eventhub_name is not None:
pulumi.set(__self__, "eventhub_name", eventhub_name)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace_name is not None:
pulumi.set(__self__, "namespace_name", namespace_name)
if shared_access_key is not None:
pulumi.set(__self__, "shared_access_key", shared_access_key)
if shared_access_key_name is not None:
pulumi.set(__self__, "shared_access_key_name", shared_access_key_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if timestamp_property_name is not None:
pulumi.set(__self__, "timestamp_property_name", timestamp_property_name)
@property
@pulumi.getter(name="consumerGroupName")
def consumer_group_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the EventHub Consumer Group that holds the partitions from which events will be read.
"""
return pulumi.get(self, "consumer_group_name")
@consumer_group_name.setter
def consumer_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "consumer_group_name", value)
@property
@pulumi.getter(name="environmentId")
def environment_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the id of the IoT Time Series Insights Environment that the Event Source should be associated with. Changing this forces a new resource to created.
"""
return pulumi.get(self, "environment_id")
@environment_id.setter
def environment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment_id", value)
@property
@pulumi.getter(name="eventSourceResourceId")
def event_source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the resource id where events will be coming from.
"""
return pulumi.get(self, "event_source_resource_id")
@event_source_resource_id.setter
def event_source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "event_source_resource_id", value)
@property
@pulumi.getter(name="eventhubName")
def eventhub_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the EventHub which will be associated with this resource.
"""
return pulumi.get(self, "eventhub_name")
@eventhub_name.setter
def eventhub_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eventhub_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Azure IoT Time Series Insights EventHub Event Source. Changing this forces a new resource to be created. Must be globally unique.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the EventHub Namespace name.
"""
return pulumi.get(self, "namespace_name")
@namespace_name.setter
def namespace_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace_name", value)
@property
@pulumi.getter(name="sharedAccessKey")
def shared_access_key(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the value of the Shared Access Policy key that grants the Time Series Insights service read access to the EventHub.
"""
return pulumi.get(self, "shared_access_key")
@shared_access_key.setter
def shared_access_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shared_access_key", value)
@property
@pulumi.getter(name="sharedAccessKeyName")
def shared_access_key_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Shared Access key that grants the Event Source access to the EventHub.
"""
return pulumi.get(self, "shared_access_key_name")
@shared_access_key_name.setter
def shared_access_key_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shared_access_key_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="timestampPropertyName")
def timestamp_property_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the value that will be used as the event source's timestamp. This value defaults to the event creation time.
"""
return pulumi.get(self, "timestamp_property_name")
@timestamp_property_name.setter
def timestamp_property_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timestamp_property_name", value)
class TimeSeriesInsightsEventSourceEventhub(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
consumer_group_name: Optional[pulumi.Input[str]] = None,
environment_id: Optional[pulumi.Input[str]] = None,
event_source_resource_id: Optional[pulumi.Input[str]] = None,
eventhub_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
shared_access_key: Optional[pulumi.Input[str]] = None,
shared_access_key_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
timestamp_property_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an Azure IoT Time Series Insights EventHub Event Source.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_event_hub_namespace = azure.eventhub.EventHubNamespace("exampleEventHubNamespace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="Standard")
example_event_hub = azure.eventhub.EventHub("exampleEventHub",
namespace_name=example_event_hub_namespace.name,
resource_group_name=example_resource_group.name,
partition_count=2,
message_retention=7)
example_consumer_group = azure.eventhub.ConsumerGroup("exampleConsumerGroup",
namespace_name=example_event_hub_namespace.name,
eventhub_name=example_event_hub.name,
resource_group_name=example_resource_group.name)
example_authorization_rule = azure.eventhub.AuthorizationRule("exampleAuthorizationRule",
namespace_name=example_event_hub_namespace.name,
eventhub_name=example_event_hub.name,
resource_group_name=example_resource_group.name,
listen=True,
send=False,
manage=False)
example_account = azure.storage.Account("exampleAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
account_tier="Standard",
account_replication_type="LRS")
example_time_series_insights_gen2_environment = azure.iot.TimeSeriesInsightsGen2Environment("exampleTimeSeriesInsightsGen2Environment",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku_name="L1",
id_properties=["id"],
storage=azure.iot.TimeSeriesInsightsGen2EnvironmentStorageArgs(
name=example_account.name,
key=example_account.primary_access_key,
))
example_time_series_insights_event_source_eventhub = azure.iot.TimeSeriesInsightsEventSourceEventhub("exampleTimeSeriesInsightsEventSourceEventhub",
location=example_resource_group.location,
environment_id=example_time_series_insights_gen2_environment.id,
eventhub_name=example_event_hub.name,
namespace_name=example_event_hub_namespace.name,
shared_access_key=example_authorization_rule.primary_key,
shared_access_key_name=example_authorization_rule.name,
consumer_group_name=example_consumer_group.name,
event_source_resource_id=example_event_hub.id)
```
## Import
Azure IoT Time Series Insights EventHub Event Source can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:iot/timeSeriesInsightsEventSourceEventhub:TimeSeriesInsightsEventSourceEventhub example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example/providers/Microsoft.TimeSeriesInsights/environments/environment1/eventSources/example
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] consumer_group_name: Specifies the name of the EventHub Consumer Group that holds the partitions from which events will be read.
:param pulumi.Input[str] environment_id: Specifies the id of the IoT Time Series Insights Environment that the Event Source should be associated with. Changing this forces a new resource to created.
:param pulumi.Input[str] event_source_resource_id: Specifies the resource id where events will be coming from.
:param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub which will be associated with this resource.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Azure IoT Time Series Insights EventHub Event Source. Changing this forces a new resource to be created. Must be globally unique.
:param pulumi.Input[str] namespace_name: Specifies the EventHub Namespace name.
:param pulumi.Input[str] shared_access_key: Specifies the value of the Shared Access Policy key that grants the Time Series Insights service read access to the EventHub.
:param pulumi.Input[str] shared_access_key_name: Specifies the name of the Shared Access key that grants the Event Source access to the EventHub.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] timestamp_property_name: Specifies the value that will be used as the event source's timestamp. This value defaults to the event creation time.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TimeSeriesInsightsEventSourceEventhubArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Azure IoT Time Series Insights EventHub Event Source.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_event_hub_namespace = azure.eventhub.EventHubNamespace("exampleEventHubNamespace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="Standard")
example_event_hub = azure.eventhub.EventHub("exampleEventHub",
namespace_name=example_event_hub_namespace.name,
resource_group_name=example_resource_group.name,
partition_count=2,
message_retention=7)
example_consumer_group = azure.eventhub.ConsumerGroup("exampleConsumerGroup",
namespace_name=example_event_hub_namespace.name,
eventhub_name=example_event_hub.name,
resource_group_name=example_resource_group.name)
example_authorization_rule = azure.eventhub.AuthorizationRule("exampleAuthorizationRule",
namespace_name=example_event_hub_namespace.name,
eventhub_name=example_event_hub.name,
resource_group_name=example_resource_group.name,
listen=True,
send=False,
manage=False)
example_account = azure.storage.Account("exampleAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
account_tier="Standard",
account_replication_type="LRS")
example_time_series_insights_gen2_environment = azure.iot.TimeSeriesInsightsGen2Environment("exampleTimeSeriesInsightsGen2Environment",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku_name="L1",
id_properties=["id"],
storage=azure.iot.TimeSeriesInsightsGen2EnvironmentStorageArgs(
name=example_account.name,
key=example_account.primary_access_key,
))
example_time_series_insights_event_source_eventhub = azure.iot.TimeSeriesInsightsEventSourceEventhub("exampleTimeSeriesInsightsEventSourceEventhub",
location=example_resource_group.location,
environment_id=example_time_series_insights_gen2_environment.id,
eventhub_name=example_event_hub.name,
namespace_name=example_event_hub_namespace.name,
shared_access_key=example_authorization_rule.primary_key,
shared_access_key_name=example_authorization_rule.name,
consumer_group_name=example_consumer_group.name,
event_source_resource_id=example_event_hub.id)
```
## Import
Azure IoT Time Series Insights EventHub Event Source can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:iot/timeSeriesInsightsEventSourceEventhub:TimeSeriesInsightsEventSourceEventhub example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example/providers/Microsoft.TimeSeriesInsights/environments/environment1/eventSources/example
```
:param str resource_name: The name of the resource.
:param TimeSeriesInsightsEventSourceEventhubArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TimeSeriesInsightsEventSourceEventhubArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
consumer_group_name: Optional[pulumi.Input[str]] = None,
environment_id: Optional[pulumi.Input[str]] = None,
event_source_resource_id: Optional[pulumi.Input[str]] = None,
eventhub_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
shared_access_key: Optional[pulumi.Input[str]] = None,
shared_access_key_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
timestamp_property_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TimeSeriesInsightsEventSourceEventhubArgs.__new__(TimeSeriesInsightsEventSourceEventhubArgs)
if consumer_group_name is None and not opts.urn:
raise TypeError("Missing required property 'consumer_group_name'")
__props__.__dict__["consumer_group_name"] = consumer_group_name
if environment_id is None and not opts.urn:
raise TypeError("Missing required property 'environment_id'")
__props__.__dict__["environment_id"] = environment_id
if event_source_resource_id is None and not opts.urn:
raise TypeError("Missing required property 'event_source_resource_id'")
__props__.__dict__["event_source_resource_id"] = event_source_resource_id
if eventhub_name is None and not opts.urn:
raise TypeError("Missing required property 'eventhub_name'")
__props__.__dict__["eventhub_name"] = eventhub_name
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if namespace_name is None and not opts.urn:
raise TypeError("Missing required property 'namespace_name'")
__props__.__dict__["namespace_name"] = namespace_name
if shared_access_key is None and not opts.urn:
raise TypeError("Missing required property 'shared_access_key'")
__props__.__dict__["shared_access_key"] = shared_access_key
if shared_access_key_name is None and not opts.urn:
raise TypeError("Missing required property 'shared_access_key_name'")
__props__.__dict__["shared_access_key_name"] = shared_access_key_name
__props__.__dict__["tags"] = tags
__props__.__dict__["timestamp_property_name"] = timestamp_property_name
super(TimeSeriesInsightsEventSourceEventhub, __self__).__init__(
'azure:iot/timeSeriesInsightsEventSourceEventhub:TimeSeriesInsightsEventSourceEventhub',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
consumer_group_name: Optional[pulumi.Input[str]] = None,
environment_id: Optional[pulumi.Input[str]] = None,
event_source_resource_id: Optional[pulumi.Input[str]] = None,
eventhub_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
shared_access_key: Optional[pulumi.Input[str]] = None,
shared_access_key_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
timestamp_property_name: Optional[pulumi.Input[str]] = None) -> 'TimeSeriesInsightsEventSourceEventhub':
"""
Get an existing TimeSeriesInsightsEventSourceEventhub resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] consumer_group_name: Specifies the name of the EventHub Consumer Group that holds the partitions from which events will be read.
:param pulumi.Input[str] environment_id: Specifies the id of the IoT Time Series Insights Environment that the Event Source should be associated with. Changing this forces a new resource to created.
:param pulumi.Input[str] event_source_resource_id: Specifies the resource id where events will be coming from.
:param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub which will be associated with this resource.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Azure IoT Time Series Insights EventHub Event Source. Changing this forces a new resource to be created. Must be globally unique.
:param pulumi.Input[str] namespace_name: Specifies the EventHub Namespace name.
:param pulumi.Input[str] shared_access_key: Specifies the value of the Shared Access Policy key that grants the Time Series Insights service read access to the EventHub.
:param pulumi.Input[str] shared_access_key_name: Specifies the name of the Shared Access key that grants the Event Source access to the EventHub.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] timestamp_property_name: Specifies the value that will be used as the event source's timestamp. This value defaults to the event creation time.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TimeSeriesInsightsEventSourceEventhubState.__new__(_TimeSeriesInsightsEventSourceEventhubState)
__props__.__dict__["consumer_group_name"] = consumer_group_name
__props__.__dict__["environment_id"] = environment_id
__props__.__dict__["event_source_resource_id"] = event_source_resource_id
__props__.__dict__["eventhub_name"] = eventhub_name
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["namespace_name"] = namespace_name
__props__.__dict__["shared_access_key"] = shared_access_key
__props__.__dict__["shared_access_key_name"] = shared_access_key_name
__props__.__dict__["tags"] = tags
__props__.__dict__["timestamp_property_name"] = timestamp_property_name
return TimeSeriesInsightsEventSourceEventhub(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="consumerGroupName")
def consumer_group_name(self) -> pulumi.Output[str]:
"""
Specifies the name of the EventHub Consumer Group that holds the partitions from which events will be read.
"""
return pulumi.get(self, "consumer_group_name")
@property
@pulumi.getter(name="environmentId")
def environment_id(self) -> pulumi.Output[str]:
"""
Specifies the id of the IoT Time Series Insights Environment that the Event Source should be associated with. Changing this forces a new resource to created.
"""
return pulumi.get(self, "environment_id")
@property
@pulumi.getter(name="eventSourceResourceId")
def event_source_resource_id(self) -> pulumi.Output[str]:
"""
Specifies the resource id where events will be coming from.
"""
return pulumi.get(self, "event_source_resource_id")
@property
@pulumi.getter(name="eventhubName")
def eventhub_name(self) -> pulumi.Output[str]:
"""
Specifies the name of the EventHub which will be associated with this resource.
"""
return pulumi.get(self, "eventhub_name")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Azure IoT Time Series Insights EventHub Event Source. Changing this forces a new resource to be created. Must be globally unique.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> pulumi.Output[str]:
"""
Specifies the EventHub Namespace name.
"""
return pulumi.get(self, "namespace_name")
@property
@pulumi.getter(name="sharedAccessKey")
def shared_access_key(self) -> pulumi.Output[str]:
"""
Specifies the value of the Shared Access Policy key that grants the Time Series Insights service read access to the EventHub.
"""
return pulumi.get(self, "shared_access_key")
@property
@pulumi.getter(name="sharedAccessKeyName")
def shared_access_key_name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Shared Access key that grants the Event Source access to the EventHub.
"""
return pulumi.get(self, "shared_access_key_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="timestampPropertyName")
def timestamp_property_name(self) -> pulumi.Output[str]:
"""
Specifies the value that will be used as the event source's timestamp. This value defaults to the event creation time.
"""
return pulumi.get(self, "timestamp_property_name")
|
examples/Cuda/ReconstructionSystem/profile/scripts/draw_mc_time.py | devshank3/Open3D | 113 | 12694660 | import matplotlib.pyplot as plt
import numpy as np
def draw_text(ax, x, y):
max_y = np.max(y)
for i, v in enumerate(y):
text = str(y[i])
ax.text(x[i] - 0.045 * len(text), y[i],
r'\textbf{' + text + '}')
def draw_error_bar(ax, x, xticks, y_gpu, e_gpu, title, ylabel):
offset = 0.2
width = offset * 2
bar_gpu = ax.bar(x, y_gpu, width=width, color=(0.86, 0.27, 0.22))
ax.errorbar(x, y_gpu, yerr=e_gpu, fmt='.', color=(0.96, 0.71, 0),capsize=10)
ax.yaxis.grid(True)
ax.set_xticks(x)
ax.set_xticklabels(xticks)
ax.set_title(title)
ax.set_ylabel(ylabel)
if __name__ == '__main__':
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig, axs = plt.subplots(figsize=(5, 3))
x = np.array([1, 2, 3, 4, 5])
labels = [r'\textit{fr2\_desktop}', r'\textit{fr3\_household}',
r'\textit{lounge}', r'\textit{copyroom}',
r'\textit{livingroom1}']
# mean = 8.713374, std = 1.637024
y_gpu_mc = np.array([8.71, 11.63, 6.28, 5.18, 4.07])
e_gpu_mc = np.array([1.64, 3.25, 1.98, 1.94, 1.15])
draw_error_bar(axs, x, labels,
y_gpu_mc, e_gpu_mc,
r'\textbf{Marching Cubes for Voxels in Frustum}',
r'\textbf{Average time per frame} (ms)')
fig.tight_layout()
# bar_cpu_odom = plt.bar(x + offset, y_cpu_odom, width=width)
# plt.errorbar(x + offset, y_cpu_odom, yerr=e_cpu_odom, fmt='.g', capsize=20)
# for i, v in enumerate(y_cpu_odom):
# plt.text(x[i] + offset + 0.02, y_cpu_odom[i] + 5, str(y_cpu_odom[i]))
plt.savefig('mc_time.pdf', bbox_inches='tight')
plt.show()
|
cookbook/gravmag_transform_rtp.py | XuesongDing/fatiando | 179 | 12694664 | <gh_stars>100-1000
"""
GravMag: Reduction to the pole of a total field anomaly using FFT
"""
from fatiando import mesher, gridder, utils
from fatiando.gravmag import prism, transform
from fatiando.vis import mpl
# Direction of the Geomagnetic field
inc, dec = -60, 0
# Make a model with only induced magnetization
model = [mesher.Prism(-100, 100, -100, 100, 0, 2000,
{'magnetization': utils.ang2vec(10, inc, dec)})]
area = (-5000, 5000, -5000, 5000)
shape = (100, 100)
z0 = -500
x, y, z = gridder.regular(area, shape, z=z0)
tf = utils.contaminate(prism.tf(x, y, z, model, inc, dec),
1, seed=0)
# Reduce to the pole using FFT. Since there is only induced magnetization, the
# magnetization direction (sinc and sdec) is the same as the geomagnetic field
pole = transform.reduce_to_pole(x, y, tf, shape, inc, dec, sinc=inc, sdec=dec)
# Calculate the true value at the pole for comparison
true = prism.tf(x, y, z, model, 90, 0, pmag=utils.ang2vec(10, 90, 0))
fig, axes = mpl.subplots(1, 3, figsize=(14, 4))
for ax in axes:
ax.set_aspect('equal')
mpl.sca(axes[0])
mpl.title("Original total field anomaly")
mpl.contourf(y, x, tf, shape, 30, cmap=mpl.cm.RdBu_r)
mpl.colorbar(pad=0).set_label('nT')
mpl.m2km()
mpl.sca(axes[1])
mpl.title("True value at pole")
mpl.contourf(y, x, true, shape, 30, cmap=mpl.cm.RdBu_r)
mpl.colorbar(pad=0).set_label('nT')
mpl.m2km()
mpl.sca(axes[2])
mpl.title("Reduced to the pole")
mpl.contourf(y, x, pole, shape, 30, cmap=mpl.cm.RdBu_r)
mpl.colorbar(pad=0).set_label('nT')
mpl.m2km()
mpl.tight_layout()
mpl.show()
|
legacy/backends/processing/processing_base_backend.py | ParikhKadam/zenml | 1,275 | 12694670 | # Copyright (c) ZenML GmbH 2020. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Definition of the data Processing Backend"""
from typing import Optional, List, Text
from zenml.backends import BaseBackend
class ProcessingBaseBackend(BaseBackend):
"""
Use this class to run a ZenML pipeline locally.
Every ZenML pipeline runs in backends.
A dedicated processing backend can be used to efficiently process large
amounts of incoming data in parallel, potentially distributed across
multiple machines. This can happen on local processing backends as well
as cloud-based variants like Google Cloud Dataflow. More powerful machines
with higher core counts and clock speeds can be leveraged to increase
processing throughput significantly.
"""
BACKEND_TYPE = 'processing'
def get_beam_args(self,
pipeline_name: Text = None,
pipeline_root: Text = None) -> \
Optional[List[Text]]:
"""
Returns a list of beam args for the pipeline.
Args:
pipeline_name: Name of the pipeline.
pipeline_root: Root dir of pipeline.
"""
# TODO: [LOW] Check if multiprocessing slows pipeline down or not.
return [
# '--direct_running_mode=multi_processing',
# # 0 means auto-detect based on on the number of CPUs available
# # during execution time.
# '--direct_num_workers=0',
]
|
build/util/lastchange.py | zealoussnow/chromium | 14,668 | 12694693 | <gh_stars>1000+
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
lastchange.py -- Chromium revision fetching utility.
"""
from __future__ import print_function
import argparse
import collections
import datetime
import logging
import os
import subprocess
import sys
VersionInfo = collections.namedtuple("VersionInfo",
("revision_id", "revision", "timestamp"))
class GitError(Exception):
pass
# This function exists for compatibility with logic outside this
# repository that uses this file as a library.
# TODO(eliribble) remove this function after it has been ported into
# the repositories that depend on it
def RunGitCommand(directory, command):
"""
Launches git subcommand.
Errors are swallowed.
Returns:
A process object or None.
"""
command = ['git'] + command
# Force shell usage under cygwin. This is a workaround for
# mysterious loss of cwd while invoking cygwin's git.
# We can't just pass shell=True to Popen, as under win32 this will
# cause CMD to be used, while we explicitly want a cygwin shell.
if sys.platform == 'cygwin':
command = ['sh', '-c', ' '.join(command)]
try:
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=directory,
shell=(sys.platform=='win32'))
return proc
except OSError as e:
logging.error('Command %r failed: %s' % (' '.join(command), e))
return None
def _RunGitCommand(directory, command):
"""Launches git subcommand.
Returns:
The stripped stdout of the git command.
Raises:
GitError on failure, including a nonzero return code.
"""
command = ['git'] + command
# Force shell usage under cygwin. This is a workaround for
# mysterious loss of cwd while invoking cygwin's git.
# We can't just pass shell=True to Popen, as under win32 this will
# cause CMD to be used, while we explicitly want a cygwin shell.
if sys.platform == 'cygwin':
command = ['sh', '-c', ' '.join(command)]
try:
logging.info("Executing '%s' in %s", ' '.join(command), directory)
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=directory,
shell=(sys.platform=='win32'))
stdout, stderr = tuple(x.decode(encoding='utf_8')
for x in proc.communicate())
stdout = stdout.strip()
logging.debug("returncode: %d", proc.returncode)
logging.debug("stdout: %s", stdout)
logging.debug("stderr: %s", stderr)
if proc.returncode != 0 or not stdout:
raise GitError((
"Git command '{}' in {} failed: "
"rc={}, stdout='{}' stderr='{}'").format(
" ".join(command), directory, proc.returncode, stdout, stderr))
return stdout
except OSError as e:
raise GitError("Git command 'git {}' in {} failed: {}".format(
" ".join(command), directory, e))
def GetMergeBase(directory, ref):
"""
Return the merge-base of HEAD and ref.
Args:
directory: The directory containing the .git directory.
ref: The ref to use to find the merge base.
Returns:
The git commit SHA of the merge-base as a string.
"""
logging.debug("Calculating merge base between HEAD and %s in %s",
ref, directory)
command = ['merge-base', 'HEAD', ref]
return _RunGitCommand(directory, command)
def FetchGitRevision(directory, commit_filter, start_commit="HEAD"):
"""
Fetch the Git hash (and Cr-Commit-Position if any) for a given directory.
Args:
directory: The directory containing the .git directory.
commit_filter: A filter to supply to grep to filter commits
start_commit: A commit identifier. The result of this function
will be limited to only consider commits before the provided
commit.
Returns:
A VersionInfo object. On error all values will be 0.
"""
hash_ = ''
git_args = ['log', '-1', '--format=%H %ct']
if commit_filter is not None:
git_args.append('--grep=' + commit_filter)
git_args.append(start_commit)
output = _RunGitCommand(directory, git_args)
hash_, commit_timestamp = output.split()
if not hash_:
return VersionInfo('0', '0', 0)
revision = hash_
output = _RunGitCommand(directory, ['cat-file', 'commit', hash_])
for line in reversed(output.splitlines()):
if line.startswith('Cr-Commit-Position:'):
pos = line.rsplit()[-1].strip()
logging.debug("Found Cr-Commit-Position '%s'", pos)
revision = "{}-{}".format(hash_, pos)
break
return VersionInfo(hash_, revision, int(commit_timestamp))
def GetHeaderGuard(path):
"""
Returns the header #define guard for the given file path.
This treats everything after the last instance of "src/" as being a
relevant part of the guard. If there is no "src/", then the entire path
is used.
"""
src_index = path.rfind('src/')
if src_index != -1:
guard = path[src_index + 4:]
else:
guard = path
guard = guard.upper()
return guard.replace('/', '_').replace('.', '_').replace('\\', '_') + '_'
def GetHeaderContents(path, define, version):
"""
Returns what the contents of the header file should be that indicate the given
revision.
"""
header_guard = GetHeaderGuard(path)
header_contents = """/* Generated by lastchange.py, do not edit.*/
#ifndef %(header_guard)s
#define %(header_guard)s
#define %(define)s "%(version)s"
#endif // %(header_guard)s
"""
header_contents = header_contents % { 'header_guard': header_guard,
'define': define,
'version': version }
return header_contents
def GetGitTopDirectory(source_dir):
"""Get the top git directory - the directory that contains the .git directory.
Args:
source_dir: The directory to search.
Returns:
The output of "git rev-parse --show-toplevel" as a string
"""
return _RunGitCommand(source_dir, ['rev-parse', '--show-toplevel'])
def WriteIfChanged(file_name, contents):
"""
Writes the specified contents to the specified file_name
iff the contents are different than the current contents.
Returns if new data was written.
"""
try:
old_contents = open(file_name, 'r').read()
except EnvironmentError:
pass
else:
if contents == old_contents:
return False
os.unlink(file_name)
open(file_name, 'w').write(contents)
return True
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(usage="lastchange.py [options]")
parser.add_argument("-m", "--version-macro",
help=("Name of C #define when using --header. Defaults to "
"LAST_CHANGE."))
parser.add_argument("-o", "--output", metavar="FILE",
help=("Write last change to FILE. "
"Can be combined with --header to write both files."))
parser.add_argument("--header", metavar="FILE",
help=("Write last change to FILE as a C/C++ header. "
"Can be combined with --output to write both files."))
parser.add_argument("--merge-base-ref",
default=None,
help=("Only consider changes since the merge "
"base between HEAD and the provided ref"))
parser.add_argument("--revision-id-only", action='store_true',
help=("Output the revision as a VCS revision ID only (in "
"Git, a 40-character commit hash, excluding the "
"Cr-Commit-Position)."))
parser.add_argument("--revision-id-prefix",
metavar="PREFIX",
help=("Adds a string prefix to the VCS revision ID."))
parser.add_argument("--print-only", action="store_true",
help=("Just print the revision string. Overrides any "
"file-output-related options."))
parser.add_argument("-s", "--source-dir", metavar="DIR",
help="Use repository in the given directory.")
parser.add_argument("--filter", metavar="REGEX",
help=("Only use log entries where the commit message "
"matches the supplied filter regex. Defaults to "
"'^Change-Id:' to suppress local commits."),
default='^Change-Id:')
args, extras = parser.parse_known_args(argv[1:])
logging.basicConfig(level=logging.WARNING)
out_file = args.output
header = args.header
commit_filter=args.filter
while len(extras) and out_file is None:
if out_file is None:
out_file = extras.pop(0)
if extras:
sys.stderr.write('Unexpected arguments: %r\n\n' % extras)
parser.print_help()
sys.exit(2)
source_dir = args.source_dir or os.path.dirname(os.path.abspath(__file__))
try:
git_top_dir = GetGitTopDirectory(source_dir)
except GitError as e:
logging.error("Failed to get git top directory from '%s': %s",
source_dir, e)
return 2
if args.merge_base_ref:
try:
merge_base_sha = GetMergeBase(git_top_dir, args.merge_base_ref)
except GitError as e:
logging.error("You requested a --merge-base-ref value of '%s' but no "
"merge base could be found between it and HEAD. Git "
"reports: %s", args.merge_base_ref, e)
return 3
else:
merge_base_sha = 'HEAD'
try:
version_info = FetchGitRevision(git_top_dir, commit_filter, merge_base_sha)
except GitError as e:
logging.error("Failed to get version info: %s", e)
logging.info(("Falling back to a version of 0.0.0 to allow script to "
"finish. This is normal if you are bootstrapping a new environment "
"or do not have a git repository for any other reason. If not, this "
"could represent a serious error."))
version_info = VersionInfo('0', '0', 0)
revision_string = version_info.revision
if args.revision_id_only:
revision_string = version_info.revision_id
if args.revision_id_prefix:
revision_string = args.revision_id_prefix + revision_string
if args.print_only:
print(revision_string)
else:
lastchange_year = datetime.datetime.utcfromtimestamp(
version_info.timestamp).year
contents_lines = [
"LASTCHANGE=%s" % revision_string,
"LASTCHANGE_YEAR=%s" % lastchange_year,
]
contents = '\n'.join(contents_lines) + '\n'
if not out_file and not args.header:
sys.stdout.write(contents)
else:
if out_file:
committime_file = out_file + '.committime'
out_changed = WriteIfChanged(out_file, contents)
if out_changed or not os.path.exists(committime_file):
with open(committime_file, 'w') as timefile:
timefile.write(str(version_info.timestamp))
if header:
WriteIfChanged(header,
GetHeaderContents(header, args.version_macro,
revision_string))
return 0
if __name__ == '__main__':
sys.exit(main())
|
Testing/elx_compare_overlap.py | eliseemond/elastix | 318 | 12694700 | <reponame>eliseemond/elastix
import sys, subprocess
import os
import os.path
import shutil
import re
import glob
from optparse import OptionParser
#-------------------------------------------------------------------------------
# the main function
# Below we deform the moving image segmentation by the current result as well as
# by a previous stored result. This makes this test a regression test.
#
# We could instead compare with a fixed image segmentation, but that would require
# the tested registrations to be relatively good, which they are not to save time.
def main():
# usage, parse parameters
usage = "usage: %prog [options] arg";
parser = OptionParser( usage );
# option to debug and verbose
parser.add_option( "-v", "--verbose", action="store_true", dest="verbose" );
# options to control files
parser.add_option( "-d", "--directory", dest="directory", help="elastix output directory" );
parser.add_option( "-m", "--movingsegmentation", dest="mseg", help="moving image segmentation" );
parser.add_option( "-b", "--baselinetp", dest="btp", help="baseline transform parameter file" );
parser.add_option( "-p", "--path", dest="path", help="path where executables can be found" );
(options, args) = parser.parse_args();
# Check if option -d and -m and -b are given
if options.directory == None :
parser.error( "The option directory (-d) should be given" );
if options.mseg == None :
parser.error( "The option directory (-m) should be given" );
if options.btp == None :
parser.error( "The option directory (-b) should be given" );
# Get the transform parameters files
tpFileName_in = os.path.join( options.directory, "TransformParameters.0.txt" );
tpFileName = os.path.join( options.directory, "TransformParameters.seg.txt" );
tpFileName_b_in = options.btp;
tpFileName_b = os.path.join( options.directory, "TransformParameters.baseline.seg.txt" );
# Sanity checks
if not os.path.exists( tpFileName_in ) :
print( "ERROR: the file " + tpFileName_in + " does not exist" );
return 1;
# Below we use programs that are compiled with elastix, and are thus available
# in the binary directory. The user of this script has to supply the path
# to the binary directory via the command line.
# In order to make sure that python is able to find these programs we add
# the paths to the local environment.
_path = os.path.dirname( options.path );
_path += os.pathsep + os.getenv('PATH');
os.environ['PATH'] = _path;
#
# Deform the moving image segmentation by the current result
#
print( "Deforming moving image segmentation using " + tpFileName_in );
# Make the transform parameters file suitable for binary images
f1 = open( tpFileName_in, 'r' ); f2 = open( tpFileName, 'w' );
for line in f1 :
lineout = line.replace( '(FinalBSplineInterpolationOrder 3)', '(FinalBSplineInterpolationOrder 0)' );
lineout = re.sub( "(ResultImageFormat \"mhd\")", "ResultImageFormat \"mha\"", lineout );
lineout = re.sub( "(ResultImagePixelType \"short\")", "ResultImagePixelType \"unsigned char\"", lineout );
lineout = re.sub( "(CompressResultImage \"false\")", "CompressResultImage \"true\"", lineout );
f2.write( lineout );
f1.close(); f2.close();
# Transform the moving image segmentation to mimick the baseline result
seg = os.path.join( options.directory, "result.mha" );
seg_defm = os.path.join( options.directory, "segmentation_deformed.mha" );
subprocess.call( [ "transformix", "-in", options.mseg, "-out", options.directory, "-tp", tpFileName ],
stdout=subprocess.PIPE );
if( os.path.exists( seg_defm ) ) : os.remove( seg_defm );
shutil.move( seg, seg_defm );
#
# Deform the moving image segmentation by the baseline result
#
print( "Deforming moving image segmentation using " + tpFileName_b_in );
# Make the transform parameters file suitable for binary images
f1 = open( tpFileName_b_in, 'r' ); f2 = open( tpFileName_b, 'w' );
for line in f1 :
lineout = line.replace( '(FinalBSplineInterpolationOrder 3)', '(FinalBSplineInterpolationOrder 0)' );
lineout = re.sub( "(ResultImageFormat \"mhd\")", "ResultImageFormat \"mha\"", lineout );
lineout = re.sub( "(ResultImagePixelType \"short\")", "ResultImagePixelType \"unsigned char\"", lineout );
lineout = re.sub( "(CompressResultImage \"false\")", "CompressResultImage \"true\"", lineout );
f2.write( lineout );
f1.close(); f2.close();
# Transform the moving image segmentation to mimick the fixed image segmentation
seg_defb = os.path.join( options.directory, "segmentation_baseline.mha" );
subprocess.call( [ "transformix", "-in", options.mseg, "-out", options.directory, "-tp", tpFileName_b ],
stdout=subprocess.PIPE );
if( os.path.exists( seg_defb ) ) : os.remove( seg_defb );
shutil.move( seg, seg_defb );
# Compute the overlap between baseline segmentation and deformed moving segmentation
try :
# This will work from python 2.7 on
outputAsString = subprocess.check_output( [ "elxComputeOverlap", "-in", seg_defm, seg_defb ] ).decode("utf-8");
except :
# Workaround for python 2.6 and lower. For MacMini specifically.
outputAsString = subprocess.Popen( [ "elxComputeOverlap", "-in", seg_defm, seg_defb ], stdout=subprocess.PIPE ).communicate()[0].decode("utf-8");
overlap = outputAsString[ outputAsString.find( "Overlap" ) : ].strip( "Overlap: " );
# Report
print( "The segmentation overlap between current and baseline is " + overlap );
if float( overlap ) > 0.99 :
print( "SUCCESS: overlap is higher than 0.99" );
return 0;
else :
print( "FAILURE: overlap is lower than 0.99" );
return 1;
#-------------------------------------------------------------------------------
if __name__ == '__main__':
sys.exit(main())
|
plenum/test/metrics/helper.py | andkononykhin/plenum | 148 | 12694710 | <reponame>andkononykhin/plenum
from datetime import datetime, timedelta
from numbers import Number
from random import choice, uniform, gauss, random, randint
from typing import List, Union
from plenum.common.metrics_collector import MetricsName, MetricsEvent, MetricsCollector
from plenum.common.value_accumulator import ValueAccumulator
def gen_metrics_name() -> MetricsName:
return choice(list(MetricsName))
def gen_next_timestamp(prev=None) -> datetime:
def round_ts(ts: datetime) -> datetime:
us = round(ts.microsecond - 500, -3)
return ts.replace(microsecond=us)
if prev is None:
return round_ts(datetime.utcnow())
return round_ts(prev + timedelta(seconds=uniform(0.001, 10.0)))
def generate_events(num: int, min_ts=None) -> List[MetricsEvent]:
ts = gen_next_timestamp(min_ts)
result = []
for _ in range(num):
ts = gen_next_timestamp(ts)
name = gen_metrics_name()
if random() > 0.5:
value = gauss(0.0, 100.0)
else:
value = ValueAccumulator([gauss(0.0, 100.0) for _ in range(randint(2, 5))])
result += [MetricsEvent(ts, name, value)]
return result
class MockEvent:
def __init__(self, name, count, sum):
self.name = name
self.count = count
self.sum = sum
def __eq__(self, other):
if not isinstance(other, MockEvent):
return False
if self.name != other.name:
return False
if self.count != other.count:
return False
return self.sum == other.sum
@property
def avg(self):
return self.sum / self.count
class MockMetricsCollector(MetricsCollector):
def __init__(self):
super().__init__()
self.events = []
def store_event(self, name: MetricsName, value: Union[Number, ValueAccumulator]):
if isinstance(value, Number):
self.events.append(MockEvent(name, 1, value))
else:
self.events.append(MockEvent(name, value.count, value.sum))
|
src/0485.max-consecutive-ones/max-consecutive-ones.py | lyphui/Just-Code | 782 | 12694737 | <gh_stars>100-1000
class Solution:
def findMaxConsecutiveOnes(self, nums: List[int]) -> int:
res = cnt = 0
for i in nums:
if i:
cnt += 1
else:
if cnt:
res = max(res, cnt)
cnt = 0
return max(res, cnt) |
albu/src/carvana_eval.py | chritter/kaggle_carvana_segmentation | 447 | 12694747 | <filename>albu/src/carvana_eval.py
import os
import cv2
import numpy as np
from utils import get_config, get_csv_folds
from dataset.h5like_interface import H5LikeFileInterface
from eval import Evaluator, flip
from dataset.carvana_dataset import CarvanaDataset
class FullImageEvaluator(Evaluator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def process_data(self, predicted, model, data, prefix=""):
names, samples, masks = self.get_data(data)
for i in range(len(names)):
self.prev_name = names[i]
self.full_pred = np.squeeze(predicted[i,...])
if samples is not None:
self.full_image = (samples[i,...] * 255).astype(np.uint8)
if masks is not None:
self.full_mask = (np.squeeze(masks[i,...]) * 255).astype(np.uint8)
self.on_image_constructed(prefix)
def save(self, name, prefix=""):
cv2.imwrite(os.path.join(self.config.results_dir, 'mask_{}'.format(name)), (self.full_pred * 255).astype(np.uint8))
class CarvanaEval(FullImageEvaluator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def save(self, name, prefix=""):
name, ext = os.path.splitext(name)
cv2.imwrite(os.path.join('..', 'results', self.config.folder, "{}{}.png".format(prefix, name)), (self.full_pred[:1280, :1918] * 255).astype(np.uint8))
def eval_config(config_path):
test = True
config = get_config(config_path)
num_workers = 0 if os.name == 'nt' else 3
root = config.dataset_path
image_folder_name = 'train_hq' if not test else 'test_hq'
c_ds = CarvanaDataset(root, config.img_rows, config.img_cols, image_folder_name=image_folder_name, apply_clahe=config.use_clahe)
ds = H5LikeFileInterface(c_ds)
if not test:
f = 'f04a.csv' if 'f04a' in config.folder else 'fma.csv'
folds = get_csv_folds(os.path.join('..', f), os.listdir(os.path.join(root, image_folder_name)))
else:
folds = [([], list(range(len(c_ds)))) for i in range(5)]
keval = CarvanaEval(config, ds, folds, test=test, flips=flip.FLIP_LR, num_workers=num_workers, border=0)
keval.need_dice = True
skip_folds = [i for i in range(5) if config.fold is not None and i != int(config.fold)]
print('skipping folds: ', skip_folds)
keval.predict(skip_folds=skip_folds)
if __name__ == "__main__":
for config in os.listdir('../configs'):
eval_config(os.path.join('..', 'configs', config))
|
plugins/input-syslog/examples/kafka_comsumer.py | andrei-hanciu/open-nti | 229 | 12694764 | <gh_stars>100-1000
import argparse
import json
from kafka import KafkaConsumer, KafkaProducer
parser = argparse.ArgumentParser(description='user input')
parser.add_argument("--kafka", dest="kafka_addr",
help="Provide Kafka Addr : localhost:9092")
parser.add_argument("--topic", dest="topic", default='events',
help="Specify which topic to listen")
parser.add_argument("--event", dest="event", help="Filter a specific topic")
args = parser.parse_args()
consumer = KafkaConsumer(bootstrap_servers=args.kafka_addr,
auto_offset_reset='earliest')
consumer.subscribe([args.topic])
for message in consumer:
## Decode JSON
msg = json.loads(message.value)
if args.event:
if msg['event'] and msg['event'] == args.event:
print (msg)
else:
print message
|
PokerRL/eval/rl_br/_util.py | m0n0l0c0/PokerRL | 247 | 12694795 | <reponame>m0n0l0c0/PokerRL<filename>PokerRL/eval/rl_br/_util.py
from PokerRL.game._.rl_env.poker_types.DiscretizedPokerEnv import DiscretizedPokerEnv
from PokerRL.game._.rl_env.poker_types.LimitPokerEnv import LimitPokerEnv
from PokerRL.rl.rl_util import get_builder_from_str, get_env_cls_from_str
def get_env_builder_rlbr(t_prof):
env_bldr_cls = get_builder_from_str(t_prof.env_builder_cls_str)
return env_bldr_cls(env_cls=get_env_cls_from_str(t_prof.game_cls_str),
env_args=t_prof.module_args["rlbr"].get_rlbr_env_args(
agents_env_args=t_prof.module_args["env"]))
def reset_episode_multi_action_space(rlbr_env_wrapper, opponent_agent):
ret = rlbr_env_wrapper.reset()
opponent_agent.reset(deck_state_dict=rlbr_env_wrapper.env.cards_state_dict())
return ret
def notify_agent_multi_action_space(action_int, rlbr_seat_id, rlbr_env_wrapper, opponent_agent):
_type = type(rlbr_env_wrapper.env)
if issubclass(_type, LimitPokerEnv):
opponent_agent.notify_of_action(p_id_acted=rlbr_seat_id, action_he_did=action_int)
elif issubclass(_type, DiscretizedPokerEnv):
if action_int >= 2:
raise_frac = rlbr_env_wrapper.env.bet_sizes_list_as_frac_of_pot[action_int - 2]
opponent_agent.notify_of_raise_frac_action(p_id_acted=rlbr_seat_id, frac=raise_frac)
else:
opponent_agent.notify_of_action(p_id_acted=rlbr_seat_id, action_he_did=action_int)
else:
raise ValueError(_type)
def step_from_opp_action(action_int, opponent, rlbr_env_wrapper):
_type = type(rlbr_env_wrapper.env)
if issubclass(_type, LimitPokerEnv):
return rlbr_env_wrapper.step(action=action_int)
elif issubclass(_type, DiscretizedPokerEnv):
if action_int >= 2:
raise_frac = opponent.env_bldr.env_args.bet_sizes_list_as_frac_of_pot[action_int - 2]
return rlbr_env_wrapper.step_raise_pot_frac(pot_frac=raise_frac)
else:
return rlbr_env_wrapper.step(action=action_int)
else:
raise ValueError(_type)
|
flaskblog/md/extensions.py | cnxue/Flog | 202 | 12694803 | import re
from marko import block, HTMLRenderer
class PhotoSet(block.BlockElement):
pattern = re.compile(
r' {,3}(!\[([^\[\]\n]+)?\]\(([^)\n]+)\))'
r'( {,3}(!\[([^\[\]\n]+)?\]\(([^)\n]+)\)))*[^\n\S]*$\n?', re.M
)
inline_children = True
def __init__(self, match):
self.children = match.group()
@classmethod
def match(cls, source):
return source.expect_re(cls.pattern)
@classmethod
def parse(cls, source):
rv = source.match
source.consume()
return rv
class FlogRendererMixin:
def render_image(self, element):
result = super().render_image(element)
result = result.replace(
'<img', '<img data-original="{}"'.format(self.escape_url(element.dest))
)
caption = (
'<figcaption>{}</figcaption>'.format(element.title)
if element.title else ''
)
return '<figure>{}{}</figure>'.format(result, caption)
def render_photo_set(self, element):
return '<div class="photo-set d-lg-flex">\n{}</div>\n'.format(
self.render_children(element)
)
def render_fenced_code(self, element):
rv = [
'<div class="block-code">\n'
'<div class="code-head clearfix">{}<span class="copy-code"'
' title="Copy code">{}</span></div>\n'
.format(element.extra, element.lang.upper()),
super().render_fenced_code(element),
'</div>\n'
]
return ''.join(rv)
def _open_heading_group(self):
return '<div class="list-group">\n'
def _close_heading_group(self):
return '</div>\n'
def _render_toc_item(self, slug, text):
return '<a class="list-group-item" href="#{}">{}</a>\n'.format(
slug, re.sub(r"<.+?>", "", text)
)
def render_toc(self, maxlevel=3):
return super().render_toc(maxlevel).replace(
'<div class="list-group">',
'<div class="list-group" id="table-of-content">', 1
)
def render_html_block(self, element):
# Disable tag filter, use the original render function
return HTMLRenderer.render_html_block(self, element)
class StrictHTMLRendererMixin:
def render_html_block(self, element):
# Disable tag filter, use the original render function
return self.tagfilter.sub(r'<\1', element.children)
class Flog:
elements = [PhotoSet]
renderer_mixins = [FlogRendererMixin]
class StrictFlog:
renderer_mixins = [StrictHTMLRendererMixin]
|
tests/cli/test_heartbeat.py | nicolasiltis/prefect | 8,633 | 12694810 | <filename>tests/cli/test_heartbeat.py
import pytest
from click.testing import CliRunner
from unittest.mock import MagicMock
from prefect.cli.heartbeat import heartbeat, flow_run
from prefect.utilities.configuration import set_temporary_config
def test_heartbeat_init():
runner = CliRunner()
result = runner.invoke(heartbeat)
assert result.exit_code == 0
assert "Send heartbeats back to the Prefect API." in result.output
def test_heartbeat_help():
runner = CliRunner()
result = runner.invoke(heartbeat, ["--help"])
assert result.exit_code == 0
assert "Send heartbeats back to the Prefect API." in result.output
def test_heartbeat_flow_run(patch_post, cloud_api):
patch_post(dict(data=dict(update_flow_run_heartbeat="success")))
with set_temporary_config({"cloud.heartbeat_interval": 0.1}):
runner = CliRunner()
result = runner.invoke(heartbeat, ["flow-run", "--id", "id", "--num", "1"])
assert result.exit_code == 0
def test_heartbeat_multiple_flow_run_heartbeats(patch_post, cloud_api):
post = patch_post(dict(data=dict(update_flow_run_heartbeat="success")))
with set_temporary_config({"cloud.heartbeat_interval": 0.1}):
runner = CliRunner()
result = runner.invoke(heartbeat, ["flow-run", "--id", "id", "--num", "2"])
assert result.exit_code == 0
assert post.called
assert post.call_count == 2
def test_heartbeat_is_robust_to_exceptions(cloud_api, monkeypatch, caplog):
Client = MagicMock()
monkeypatch.setattr("prefect.cli.heartbeat.Client", Client)
monkeypatch.setattr("prefect.cli.heartbeat.time.sleep", MagicMock())
Client().update_flow_run_heartbeat.side_effect = ValueError("Foo")
runner = CliRunner()
result = runner.invoke(heartbeat, ["flow-run", "--id", "id", "--num", "2"])
assert result.exit_code == 0
# Called twice despite raising errors
assert Client().update_flow_run_heartbeat.call_count == 2
assert (
f"Failed to send heartbeat with exception: {ValueError('Foo')!r}" in caplog.text
)
assert "Traceback" in caplog.text
def test_heartbeat_does_not_ignore_base_exceptions(cloud_api, monkeypatch, caplog):
Client = MagicMock()
monkeypatch.setattr("prefect.cli.heartbeat.Client", Client)
monkeypatch.setattr("prefect.cli.heartbeat.time.sleep", MagicMock())
Client().update_flow_run_heartbeat.side_effect = KeyboardInterrupt()
runner = CliRunner()
result = runner.invoke(heartbeat, ["flow-run", "--id", "id", "--num", "2"])
assert result.exit_code == 1
# Called _once_, error caused immediate exit
assert Client().update_flow_run_heartbeat.call_count == 1
assert (
"Heartbeat process encountered terminal exception: KeyboardInterrupt()"
in caplog.text
)
assert "Traceback" in caplog.text
@pytest.mark.parametrize("terminal_exc", [True, False])
def test_heartbeat_exceptions_are_logged_to_cloud(cloud_api, monkeypatch, terminal_exc):
Client = MagicMock()
LOG_MANAGER = MagicMock()
monkeypatch.setattr("prefect.cli.heartbeat.Client", Client)
monkeypatch.setattr("prefect.utilities.logging.LOG_MANAGER", LOG_MANAGER)
monkeypatch.setattr("prefect.cli.heartbeat.time.sleep", MagicMock())
Client().update_flow_run_heartbeat.side_effect = (
KeyboardInterrupt() if terminal_exc else ValueError("Foo")
)
runner = CliRunner()
runner.invoke(heartbeat, ["flow-run", "--id", "id", "--num", "2"])
# The exception was logged both times
log = LOG_MANAGER.enqueue.call_args[0][0]
assert log["flow_run_id"] == "id"
assert log["name"] == "prefect.subprocess_heartbeat"
assert log["level"] == "ERROR"
if terminal_exc:
assert (
"Heartbeat process encountered terminal exception: KeyboardInterrupt()"
in log["message"]
)
else:
assert (
f"Failed to send heartbeat with exception: {ValueError('Foo')!r}"
in log["message"]
)
assert "Traceback" in log["message"]
|
search/tests/unit/proxy/__init__.py | defendercrypt/amundsen | 2,072 | 12694812 | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
|
social_auth/backends/contrib/flickr.py | merutak/django-social-auth | 863 | 12694816 | <reponame>merutak/django-social-auth
from social.backends.flickr import FlickrOAuth as FlickrBackend
|
modules/nltk_contrib/classifier/basicimports.py | h4ck3rm1k3/NLP-project | 123 | 12694863 | <filename>modules/nltk_contrib/classifier/basicimports.py
from nltk_contrib.classifier.attribute import Attribute, Attributes
from nltk_contrib.classifier.confusionmatrix import ConfusionMatrix
from nltk_contrib.classifier.decisionstump import DecisionStump
from nltk_contrib.classifier.decisiontree import DecisionTree
from nltk_contrib.classifier.featureselect import FeatureSelection
from nltk_contrib.classifier.discretise import Discretiser
from nltk_contrib.classifier.instances import TrainingInstances, TestInstances, GoldInstances
from nltk_contrib.classifier.instance import TrainingInstance, TestInstance, GoldInstance
from nltk_contrib.classifier.knn import IB1
from nltk_contrib.classifier.naivebayes import NaiveBayes
from nltk_contrib.classifier.oner import OneR
from nltk_contrib.classifier.zeror import ZeroR
from nltk_contrib.classifier.format import c45
|
sandbox/grist/test_match_counter.py | nataliemisasi/grist-core | 2,667 | 12694885 | import random
import string
import timeit
import unittest
from collections import Hashable
import six
from six.moves import xrange
import match_counter
from testutil import repeat_until_passes
# Here's an alternative implementation. Unlike the simple one, it never constructs a new data
# structure, or modifies dictionary keys while iterating, but it is still slower.
class MatchCounterOther(object):
def __init__(self, _sample):
self.sample_counts = {v: 0 for v in _sample}
def count_unique(self, iterable):
for v in iterable:
try:
n = self.sample_counts.get(v)
if n is not None:
self.sample_counts[v] = n + 1
except TypeError:
pass
matches = 0
for v, n in six.iteritems(self.sample_counts):
if n > 0:
matches += 1
self.sample_counts[v] = 0
return matches
# If not for dealing with unhashable errors, `.intersection(iterable)` would be by far the
# fastest. But with the extra iteration and especially checking for Hashable, it's super slow.
class MatchCounterIntersection(object):
def __init__(self, _sample):
self.sample = set(_sample)
def count_unique(self, iterable):
return len(self.sample.intersection(v for v in iterable if isinstance(v, Hashable)))
# This implementation doesn't measure the intersection, but it's interesting to compare its
# timings: this is still slower! Presumably because set intersection is native code that's more
# optimized than checking membership many times from Python.
class MatchCounterSimple(object):
def __init__(self, _sample):
self.sample = set(_sample)
def count_all(self, iterable):
return sum(1 for r in iterable if present(r, self.sample))
# This is much faster than using `isinstance(v, Hashable) and v in value_set`
def present(v, value_set):
try:
return v in value_set
except TypeError:
return False
# Set up a predictable random number generator.
r = random.Random(17)
def random_string():
length = r.randint(10,20)
return ''.join(r.choice(string.ascii_letters) for x in xrange(length))
def sample_with_repl(population, n):
return [r.choice(population) for x in xrange(n)]
# Here's some sample generated data.
sample = [random_string() for x in xrange(200)]
data1 = sample_with_repl([random_string() for x in xrange(20)] + r.sample(sample, 5), 1000)
data2 = sample_with_repl([random_string() for x in xrange(100)] + r.sample(sample, 15), 500)
# Include an example with an unhashable value, to ensure all implementation can handle it.
data3 = sample_with_repl([random_string() for x in xrange(10)] + sample, 2000) + [[1,2,3]]
class TestMatchCounter(unittest.TestCase):
def test_match_counter(self):
m = match_counter.MatchCounter(sample)
self.assertEqual(m.count_unique(data1), 5)
self.assertEqual(m.count_unique(data2), 15)
self.assertEqual(m.count_unique(data3), 200)
m = MatchCounterOther(sample)
self.assertEqual(m.count_unique(data1), 5)
self.assertEqual(m.count_unique(data2), 15)
self.assertEqual(m.count_unique(data3), 200)
# Do it again to ensure that we clear out state between counting.
self.assertEqual(m.count_unique(data1), 5)
self.assertEqual(m.count_unique(data2), 15)
self.assertEqual(m.count_unique(data3), 200)
m = MatchCounterIntersection(sample)
self.assertEqual(m.count_unique(data1), 5)
self.assertEqual(m.count_unique(data2), 15)
self.assertEqual(m.count_unique(data3), 200)
m = MatchCounterSimple(sample)
self.assertGreaterEqual(m.count_all(data1), 5)
self.assertGreaterEqual(m.count_all(data2), 15)
self.assertGreaterEqual(m.count_all(data3), 200)
@repeat_until_passes(3)
def test_timing(self):
setup='''
import match_counter
import test_match_counter as t
m1 = match_counter.MatchCounter(t.sample)
m2 = t.MatchCounterOther(t.sample)
m3 = t.MatchCounterSimple(t.sample)
m4 = t.MatchCounterIntersection(t.sample)
'''
N = 100
t1 = min(timeit.repeat(stmt='m1.count_unique(t.data1)', setup=setup, number=N, repeat=3)) / N
t2 = min(timeit.repeat(stmt='m2.count_unique(t.data1)', setup=setup, number=N, repeat=3)) / N
t3 = min(timeit.repeat(stmt='m3.count_all(t.data1)', setup=setup, number=N, repeat=3)) / N
t4 = min(timeit.repeat(stmt='m4.count_unique(t.data1)', setup=setup, number=N, repeat=3)) / N
#print "Timings/iter data1: %.3fus %.3fus %.3fus %.3fus" % (t1 * 1e6, t2 * 1e6, t3*1e6, t4*1e6)
self.assertLess(t1, t2)
self.assertLess(t1, t3)
self.assertLess(t1, t4)
t1 = min(timeit.repeat(stmt='m1.count_unique(t.data2)', setup=setup, number=N, repeat=3)) / N
t2 = min(timeit.repeat(stmt='m2.count_unique(t.data2)', setup=setup, number=N, repeat=3)) / N
t3 = min(timeit.repeat(stmt='m3.count_all(t.data2)', setup=setup, number=N, repeat=3)) / N
t4 = min(timeit.repeat(stmt='m4.count_unique(t.data2)', setup=setup, number=N, repeat=3)) / N
#print "Timings/iter data2: %.3fus %.3fus %.3fus %.3fus" % (t1 * 1e6, t2 * 1e6, t3*1e6, t4*1e6)
self.assertLess(t1, t2)
self.assertLess(t1, t3)
self.assertLess(t1, t4)
t1 = min(timeit.repeat(stmt='m1.count_unique(t.data3)', setup=setup, number=N, repeat=3)) / N
t2 = min(timeit.repeat(stmt='m2.count_unique(t.data3)', setup=setup, number=N, repeat=3)) / N
t3 = min(timeit.repeat(stmt='m3.count_all(t.data3)', setup=setup, number=N, repeat=3)) / N
t4 = min(timeit.repeat(stmt='m4.count_unique(t.data3)', setup=setup, number=N, repeat=3)) / N
#print "Timings/iter data3: %.3fus %.3fus %.3fus %.3fus" % (t1 * 1e6, t2 * 1e6, t3*1e6, t4*1e6)
self.assertLess(t1, t2)
#self.assertLess(t1, t3) # This fails on occasion, but it's a fairly pointless check.
self.assertLess(t1, t4)
if __name__ == "__main__":
unittest.main()
|
docqa/scripts/show_parameters.py | Willyoung2017/doc-qa | 422 | 12694886 | import argparse
import tensorflow as tf
from docqa.model_dir import ModelDir
import numpy as np
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument("model")
args = parser.parse_args()
model_dir = ModelDir(args.model)
checkpoint = model_dir.get_best_weights()
print(checkpoint)
if checkpoint is None:
print("Show latest checkpoint")
checkpoint = model_dir.get_latest_checkpoint()
else:
print("Show best weights")
reader = tf.train.NewCheckpointReader(checkpoint)
param_map = reader.get_variable_to_shape_map()
total = 0
for k in sorted(param_map):
v = param_map[k]
print('%s: %s' % (k, str(v)))
total += np.prod(v)
print("%d total" % total)
if __name__ == "__main__":
main()
|
DS&Algo Programs in Python/prime.py | prathimacode-hub/HacktoberFest-2020 | 386 | 12694905 | <reponame>prathimacode-hub/HacktoberFest-2020<gh_stars>100-1000
n = int(input("Enter a number: "))
for i in range(1, n+1):
count = 0
for j in range(1, n+1):
if i%j==0:
count= count+1
if count==2:
print(i)
|
lib/passlib/handlers/postgres.py | Rudi9719/booksearch-web | 674 | 12694921 | """passlib.handlers.postgres_md5 - MD5-based algorithm used by Postgres for pg_shadow table"""
#=============================================================================
# imports
#=============================================================================
# core
from hashlib import md5
import re
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
# pkg
from passlib.utils import to_bytes
from passlib.utils.compat import b, bytes, str_to_uascii, unicode, u
import passlib.utils.handlers as uh
# local
__all__ = [
"postgres_md5",
]
#=============================================================================
# handler
#=============================================================================
class postgres_md5(uh.HasUserContext, uh.StaticHandler):
"""This class implements the Postgres MD5 Password hash, and follows the :ref:`password-hash-api`.
It does a single round of hashing, and relies on the username as the salt.
The :meth:`~passlib.ifc.PasswordHash.encrypt`, :meth:`~passlib.ifc.PasswordHash.genhash`, and :meth:`~passlib.ifc.PasswordHash.verify` methods all require the
following additional contextual keywords:
:type user: str
:param user: name of postgres user account this password is associated with.
"""
#===================================================================
# algorithm information
#===================================================================
name = "postgres_md5"
_hash_prefix = u("md5")
checksum_chars = uh.HEX_CHARS
checksum_size = 32
#===================================================================
# primary interface
#===================================================================
def _calc_checksum(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
user = to_bytes(self.user, "utf-8", param="user")
return str_to_uascii(md5(secret + user).hexdigest())
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#=============================================================================
|
waymo_open_dataset/latency/examples/2d_challenge/pytorch/wod_latency_submission/__init__.py | mirtaheri/waymo-open-dataset | 1,814 | 12694924 | <reponame>mirtaheri/waymo-open-dataset<filename>waymo_open_dataset/latency/examples/2d_challenge/pytorch/wod_latency_submission/__init__.py
# Copyright 2021 The Waymo Open Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module to load and run a Faster R-CNN model."""
import numpy as np
import torch
import torchvision
model = None
DATA_FIELDS = ['FRONT_IMAGE']
def initialize_model():
global model
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
model.eval()
def translate_label_to_wod(label):
"""Translate a single COCO class to its corresponding WOD class.
Note: Returns -1 if this COCO class has no corresponding class in WOD.
Args:
label: int COCO class label
Returns:
Int WOD class label, or -1.
"""
label_conversion_map = {
1: 2, # Person is ped
2: 4, # Bicycle is bicycle
3: 1, # Car is vehicle
4: 1, # Motorcycle is vehicle
6: 1, # Bus is vehicle
8: 1, # Truck is vehicle
13: 3, # Stop sign is sign
}
return label_conversion_map.get(label, -1)
# BEGIN-INTERNAL
# pylint: disable=invalid-name
# END-INTERNAL
def run_model(FRONT_IMAGE):
"""Run the model on the RGB image.
Args:
FRONT_IMAGE: H x W x 3 numpy ndarray.
Returns:
Dict from string to numpy ndarray.
"""
# Convert the image to a PyTorch-friendly format by casting it from uint8 to
# float32 (and dividing by 255 to take it from [0, 255] to [0, 1]) and
# transposing it from H x W x C to C x H x W.
transposed_float_img = np.transpose(
FRONT_IMAGE.astype(np.float32) / 255.0, [2, 0, 1])
outputs = model([torch.from_numpy(transposed_float_img)])
corners = outputs[0]['boxes'][0, ...].detach().numpy()
scores = outputs[0]['scores'][0, ...].detach().numpy()
coco_classes = outputs[0]['labels'][0, ...].detach().numpy()
# Convert the classes from COCO classes to WOD classes, and only keep
# detections that belong to a WOD class.
wod_classes = np.vectorize(translate_label_to_wod)(coco_classes)
corners = corners[wod_classes > 0]
scores = scores[wod_classes > 0]
classes = wod_classes[wod_classes > 0]
# Note: Torchvision's pretrained models returns boxes in the format
# (ymin, xmin, ymax, xmax). Thus, this function needs to convert them to the
# format expected by WOD, namely (center_x, center_y, width, height).
boxes = np.zeros_like(corners)
boxes[:, 0] = (corners[:, 3] + corners[:, 1]) / 2.0
boxes[:, 1] = (corners[:, 2] + corners[:, 0]) / 2.0
boxes[:, 2] = (corners[:, 3] - corners[:, 1])
boxes[:, 3] = (corners[:, 2] - corners[:, 0])
return {
'boxes': boxes,
'scores': scores,
'classes': classes,
}
# BEGIN-INTERNAL
# pylint: disable=invalid-name
# END-INTERNAL
|
neutron/common/coordination.py | congnt95/neutron | 1,080 | 12694948 | <gh_stars>1000+
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Coordination and locking utilities."""
import inspect
import decorator
from oslo_concurrency import lockutils
from oslo_log import log
from oslo_utils import timeutils
LOG = log.getLogger(__name__)
def synchronized(lock_name):
"""Synchronization decorator.
:param str lock_name: Lock name.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one process will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
Lock name can be formatted using Python format string syntax::
@synchronized('{f_name}-{resource.id}-{snap[name]}')
def foo(self, resource, snap):
...
Available field names are: decorated function parameters and
`f_name` as a decorated function name.
"""
@decorator.decorator
def _synchronized(f, *a, **k):
sig = inspect.signature(f).bind(*a, **k)
sig.apply_defaults()
call_args = sig.arguments
call_args['f_name'] = f.__name__
lock_format_name = lock_name.format(**call_args)
t1 = timeutils.now()
t2 = None
try:
with lockutils.lock(lock_format_name):
t2 = timeutils.now()
LOG.debug('Lock "%(name)s" acquired by "%(function)s" :: '
'waited %(wait_secs)0.3fs',
{'name': lock_format_name,
'function': f.__name__,
'wait_secs': (t2 - t1)})
return f(*a, **k)
finally:
t3 = timeutils.now()
if t2 is None:
held_secs = "N/A"
else:
held_secs = "%0.3fs" % (t3 - t2)
LOG.debug('Lock "%(name)s" released by "%(function)s" :: held '
'%(held_secs)s',
{'name': lock_format_name,
'function': f.__name__,
'held_secs': held_secs})
return _synchronized
|
allennlp/data/dataset_readers/multitask.py | MSLars/allennlp | 11,433 | 12694962 | from os import PathLike
from typing import Dict, Iterator, Union, Optional
from allennlp.data.instance import Instance
from allennlp.data.dataset_readers.dataset_reader import (
DatasetReader,
WorkerInfo,
DatasetReaderInput,
)
@DatasetReader.register("multitask")
class MultiTaskDatasetReader(DatasetReader):
"""
This `DatasetReader` simply collects a dictionary of other `DatasetReaders`. It is designed for
a different class (the `MultiTaskDataLoader`) to actually read from each of the underlying
dataset readers, and so this really is just a glorified dictionary that we can construct as a
`DatasetReader`. We throw an error if you try to actually call `read()`, because you should be
doing that differently.
Registered as a `DatasetReader` with name "multitask".
# Parameters
readers : `Dict[str, DatasetReader]`
A mapping from dataset name to `DatasetReader` objects for reading that dataset. You can
use whatever names you want for the datasets, but they have to match the keys you use for
data files and in other places in the `MultiTaskDataLoader` and `MultiTaskScheduler`.
"""
def __init__(self, readers: Dict[str, DatasetReader]) -> None:
self.readers = {
task: _MultitaskDatasetReaderShim(reader, task) for task, reader in readers.items()
}
def read( # type: ignore
self,
file_paths: Union[PathLike, str, Dict[str, Union[PathLike, str]]],
*,
force_task: Optional[str] = None
) -> Union[Iterator[Instance], Dict[str, Iterator[Instance]]]:
if force_task is None:
raise RuntimeError("This class is not designed to be called like this.")
return self.readers[force_task].read(file_paths)
@DatasetReader.register("multitask_shim")
class _MultitaskDatasetReaderShim(DatasetReader):
"""This dataset reader wraps another dataset reader and adds the name of the "task" into
each instance as a metadata field. You should not have to use this yourself."""
def __init__(self, inner: DatasetReader, head: str, **kwargs):
super().__init__(**kwargs)
self.inner = inner
self.head = head
def _set_worker_info(self, info: Optional[WorkerInfo]) -> None:
"""
Should only be used internally.
"""
super()._set_worker_info(info)
self.inner._set_worker_info(info)
def read(self, file_path: DatasetReaderInput) -> Iterator[Instance]:
from allennlp.data.fields import MetadataField
for instance in self.inner.read(file_path):
instance.add_field("task", MetadataField(self.head))
yield instance
def text_to_instance(self, *inputs) -> Instance:
from allennlp.data.fields import MetadataField
instance = self.inner.text_to_instance(*inputs)
instance.add_field("task", MetadataField(self.head))
return instance
def apply_token_indexers(self, instance: Instance) -> None:
self.inner.apply_token_indexers(instance)
|
biggan_discovery/there_and_back.py | andreasjansson/OroJaR | 199 | 12694973 | <gh_stars>100-1000
"""
Functions to smooth-out interpolations performed in Z-space. These are based on
<NAME>'s corresponding functions in manim. These are only used for
the purpose of visualization; they do not affect training.
"""
import torch
def smooth(t):
error = torch.sigmoid(torch.tensor(-5.))
return torch.clamp(torch.sigmoid(10 * (t - 0.5) - error) / (1 - 2 * error), 0, 1)
def there_and_back(t):
new_t = torch.where(t < 0.5, 2 * t, 2 * (1 - t))
return smooth(new_t)
def mid_right_mid_left_mid(steps, round=False):
t = torch.linspace(0.0, 1.0, steps)
ltr = there_and_back(t)
left_to_mid_to_left = ltr / 2
mid_to_right_to_mid = left_to_mid_to_left + 0.5
mid_to_left = torch.flip(left_to_mid_to_left[:steps//2], (0,))
mid_to_left_to_mid = torch.cat([mid_to_left, torch.flip(mid_to_left, (0,))])
out = torch.flip(torch.cat([mid_to_right_to_mid, mid_to_left_to_mid]), (0,))
if round: # [0, steps-1]
out = out.mul(steps - 1).round().long()
else: # [-1, 1]
out = out.add(-0.5).mul(2)
return out
def left_to_right(steps, round=False):
t = torch.linspace(0.0, 1.0, steps)
out = there_and_back(t)
if round:
out.mul_(steps - 1).round().long()
else:
out.add_(-0.5).mul_(2)
return out
|
desktop/core/ext-py/Django-1.11/tests/messages_tests/test_middleware.py | kokosing/hue | 5,079 | 12695000 | import unittest
from django import http
from django.contrib.messages.middleware import MessageMiddleware
class MiddlewareTests(unittest.TestCase):
def setUp(self):
self.middleware = MessageMiddleware()
def test_response_without_messages(self):
"""
MessageMiddleware is tolerant of messages not existing on request.
"""
request = http.HttpRequest()
response = http.HttpResponse()
self.middleware.process_response(request, response)
|
integration-tests/r_integration_test/deploy_query_test_model.py | DNCoelho/clipper | 1,403 | 12695010 | <filename>integration-tests/r_integration_test/deploy_query_test_model.py<gh_stars>1000+
from __future__ import absolute_import, division, print_function
import sys
import os
import time
import requests
import json
import logging
import numpy as np
if sys.version_info < (3, 0):
import subprocess32 as subprocess
else:
import subprocess
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath("%s/../../clipper_admin" % cur_dir))
sys.path.insert(0, os.path.abspath("%s/.." % cur_dir))
from test_utils import create_docker_connection, headers, BenchmarkException
logging.basicConfig(
format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
APP_NAME = "rtest-app"
APP_DEFAULT_VALUE = "NONE"
APP_SLO = 1000000
INPUT_TYPE = "doubles"
MODEL_NAME = "rtest-model"
MODEL_VERSION = 1
MODEL_IMAGE_NAME = "default-cluster-rtest-model:1"
def create_application(clipper_conn):
clipper_conn.register_application(APP_NAME, INPUT_TYPE, APP_DEFAULT_VALUE,
APP_SLO)
time.sleep(1)
def deploy_and_link_model(clipper_conn):
subprocess.check_call(["Rscript", "build_test_model.R"])
clipper_conn.deploy_model(MODEL_NAME, MODEL_VERSION, INPUT_TYPE,
MODEL_IMAGE_NAME)
clipper_conn.link_model_to_app(app_name=APP_NAME, model_name=MODEL_NAME)
def send_requests(clipper_conn):
success = False
num_tries = 0
while not success and num_tries < 5:
time.sleep(30)
num_preds = 25
num_success = 0
addr = clipper_conn.get_query_addr()
logger.info("ADDR: {}".format(addr))
for i in range(num_preds):
response = requests.post(
"http://%s/%s/predict" % (addr, APP_NAME),
headers=headers,
data=json.dumps({
'input': list(np.random.random(30))
}))
result = response.json()
if response.status_code == requests.codes.ok and not result["default"]:
num_success += 1
else:
logger.warning(result)
if num_success < num_preds:
logger.error(
"Error: %d/%d predictions were default or unsuccessful" %
(num_preds - num_success, num_preds))
if num_success > num_preds / 2.0:
success = True
num_tries += 1
if not success:
raise BenchmarkException("Error querying R model")
if __name__ == "__main__":
try:
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=True)
time.sleep(10)
try:
logger.info("Running R model deployment test")
create_application(clipper_conn)
deploy_and_link_model(clipper_conn)
time.sleep(5)
send_requests(clipper_conn)
logger.info("R model deployment completed SUCCESSFULLY!")
except BenchmarkException as e:
logger.exception("BenchmarkException in R model deployment test")
create_docker_connection(cleanup=True, start_clipper=False)
sys.exit(1)
else:
create_docker_connection(cleanup=True, start_clipper=False)
except Exception as e:
logger.exception("Exception")
create_docker_connection(cleanup=True, start_clipper=False)
sys.exit(1)
|
bf3s/algorithms/fewshot/imagenet_lowshot.py | alisure-fork/BF3S | 130 | 12695031 | <filename>bf3s/algorithms/fewshot/imagenet_lowshot.py
import os
import numpy as np
import torch
from tqdm import tqdm
import bf3s.algorithms.fewshot.fewshot as fewshot
import bf3s.utils as utils
def compute_top1_and_top5_accuracy(scores, labels):
_, topk_labels = scores.topk(5, 1, True, True)
label_ind = labels.cpu().numpy()
topk_ind = topk_labels.cpu().numpy()
top1_correct = topk_ind[:, 0] == label_ind
top5_correct = np.sum(topk_ind == label_ind.reshape((-1, 1)), axis=1)
return top1_correct.astype(float), top5_correct.astype(float)
def softmax_with_novel_prior(scores, novel_inds, base_inds, prior_m):
scores = torch.exp(scores)
scores_novel = scores[:, novel_inds]
scores_base = scores[:, base_inds]
tol = 0.0000001
scores_novel *= prior_m / (
tol + torch.sum(scores_novel, dim=1, keepdim=True).expand_as(scores_novel)
)
scores_base *= (1.0 - prior_m) / (
tol + torch.sum(scores_base, dim=1, keepdim=True).expand_as(scores_base)
)
scores[:, novel_inds] = scores_novel
scores[:, base_inds] = scores_base
return scores
class ImageNetLowShot(fewshot.FewShot):
"""Routines for evaluating a few-shot model on the ImageNet-FS benchmark."""
def __init__(self, opt, _run=None, _log=None):
super().__init__(opt, _run, _log)
self.keep_best_model_metric_name = "top5_novel"
def preprocess_novel_training_data(self, training_data):
"""Preprocess the novel training data."""
images_train, labels_train, Kids, num_base, num_novel = training_data
self.num_base = num_base
self.num_novel = num_novel
# Insert an extra singleton dimension.
images_train = images_train.unsqueeze(dim=0)
labels_train = labels_train.unsqueeze(dim=0)
Kids = Kids.unsqueeze(dim=0)
self.tensors["images_train"].resize_(images_train.size()).copy_(images_train)
self.tensors["labels_train"].resize_(labels_train.size()).copy_(labels_train)
self.tensors["Kids"].resize_(Kids.size()).copy_(Kids)
labels_train = self.tensors["labels_train"]
labels_train_1hot_size = list(labels_train.size()) + [
num_novel,
]
dim = len(labels_train_1hot_size) - 1
labels_train = labels_train.unsqueeze(dim=labels_train.dim())
self.tensors["labels_train_1hot"].resize_(labels_train_1hot_size).fill_(0).scatter_(
dim, labels_train - num_base, 1
)
def add_novel_categories(self, nove_cat_training_data):
"""Add the training data of the novel categories to the model."""
feature_extractor = self.networks["feature_extractor"]
classifier = self.networks["classifier"]
feature_extractor.eval()
classifier.eval()
self.preprocess_novel_training_data(nove_cat_training_data)
images = self.tensors["images_train"].detach()
labels_train_1hot = self.tensors["labels_train_1hot"].detach()
Kids = self.tensors["Kids"].detach()
base_ids = None if (self.num_base == 0) else Kids[:, : self.num_base].contiguous()
with torch.no_grad():
# *******************************************************************
# ****************** EXTRACT FEATS FROM EXEMPLARS *******************
meta_batch_size = images.size(0)
images = utils.convert_from_5d_to_4d(images)
features_train = feature_extractor(images)
features_train = utils.add_dimension(features_train, meta_batch_size)
# *******************************************************************
# ****************** GET CLASSIFICATION WEIGHTS *********************
# The following routine returns the classification weight vectors of
# both the base and then novel categories. For the novel categories,
# the classification weight vectors are generated using the training
# features for those novel cateogories.
clsWeights = classifier.get_classification_weights(
base_ids=base_ids,
features_train=features_train,
labels_train=labels_train_1hot,
)
# *******************************************************************
self.tensors["clsWeights"] = clsWeights.clone().detach()
def evaluate_model_on_test_images(
self, data_loader, base_classes, novel_classes, exp_id="", prior_m=0.8
):
"""Evaluate the model.
It is assumed that the user has already called the routine
add_novel_categories() before calling this function.
Args:
data_loader: data loader that feeds test images and lables in order
to evaluatethe model.
base_classes: A list with the labels of the base categories that
will be used for evaluation.
novel_classes: A list with the labels of the novel categories that
will be used for evaluation.
exp_id: A string with the id of the experiment.
prior_m: A scalar in the range [0, 1.0] that represents the prior
for whether a test image comes from the novel or base classes.
"""
feature_extractor = self.networks["feature_extractor"]
classifier = self.networks["classifier"]
feature_extractor.eval()
classifier.eval()
clsWeights = self.tensors["clsWeights"]
both_classes = base_classes + novel_classes
# Not valid classes are those that do not belong neighter to the base
# nor the nor the novel classes.
nKall = self.num_base + self.num_novel
not_valid_classes = list(set(range(nKall)).difference(set(both_classes)))
device = self.device
not_valid_classes_torch = torch.LongTensor(not_valid_classes).to(device)
base_classes_torch = torch.LongTensor(base_classes).to(device)
novel_classes_torch = torch.LongTensor(novel_classes).to(device)
top1, top1_novel, top1_base, top1_prior = None, None, None, None
top5, top5_novel, top5_base, top5_prior = None, None, None, None
all_labels = None
for idx, batch in enumerate(tqdm(data_loader(0))):
images_test, labels_test = batch
self.tensors["images_test"].resize_(images_test.size()).copy_(images_test)
self.tensors["labels_test"].resize_(labels_test.size()).copy_(labels_test)
images_test = self.tensors["images_test"].detach()
labels_test = self.tensors["labels_test"].detach()
num_test_examples = images_test.size(0)
with torch.no_grad():
features = feature_extractor(images_test)
features = features.view(1, num_test_examples, -1)
scores = classifier.apply_classification_weights(features, clsWeights)
scores = scores.view(num_test_examples, -1)
scores_prior = softmax_with_novel_prior(
scores.clone(), novel_classes_torch, base_classes_torch, prior_m
)
scores[:, not_valid_classes_torch] = -1000
top1_this, top5_this = compute_top1_and_top5_accuracy(scores, labels_test)
top1 = top1_this if top1 is None else np.concatenate((top1, top1_this))
top5 = top5_this if top5 is None else np.concatenate((top5, top5_this))
scores_prior[:, not_valid_classes_torch] = -1000
top1_this, top5_this = compute_top1_and_top5_accuracy(
scores_prior, labels_test
)
top1_prior = (
top1_this
if top1_prior is None
else np.concatenate((top1_prior, top1_this))
)
top5_prior = (
top5_this
if top5_prior is None
else np.concatenate((top5_prior, top5_this))
)
scores_novel = scores.clone()
scores_novel[:, base_classes_torch] = -1000
top1_this, top5_this = compute_top1_and_top5_accuracy(
scores_novel, labels_test
)
top1_novel = (
top1_this
if (top1_novel is None)
else np.concatenate((top1_novel, top1_this))
)
top5_novel = (
top5_this
if (top5_novel is None)
else np.concatenate((top5_novel, top5_this))
)
scores_base = scores.clone()
scores_base[:, novel_classes_torch] = -1000
top1_this, top5_this = compute_top1_and_top5_accuracy(scores_base, labels_test)
top1_base = (
top1_this
if (top1_base is None)
else np.concatenate((top1_base, top1_this))
)
top5_base = (
top5_this
if (top5_base is None)
else np.concatenate((top5_base, top5_this))
)
labels_test_np = labels_test.cpu().numpy()
all_labels = (
labels_test_np
if (all_labels is None)
else np.concatenate((all_labels, labels_test_np))
)
is_novel = np.in1d(all_labels, np.array(novel_classes))
is_base = np.in1d(all_labels, np.array(base_classes))
is_either = is_novel | is_base
top1_novel = 100 * np.mean(top1_novel[is_novel])
top1_novel_all = 100 * np.mean(top1[is_novel])
top1_base = 100 * np.mean(top1_base[is_base])
top1_base_all = 100 * np.mean(top1[is_base])
top1_all = 100 * np.mean(top1[is_either])
top1_all_prior = 100 * np.mean(top1_prior[is_either])
top5_novel = 100 * np.mean(top5_novel[is_novel])
top5_novel_all = 100 * np.mean(top5[is_novel])
top5_base = 100 * np.mean(top5_base[is_base])
top5_base_all = 100 * np.mean(top5[is_base])
top5_all = 100 * np.mean(top5[is_either])
top5_all_prior = 100 * np.mean(top5_prior[is_either])
self.logger.info(f"Experiment {exp_id}")
self.logger.info(
"==> Top 5 Accuracies: [Novel: {:3.2f} | Base: {:3.2f} | All {:3.2f} | Novel vs All {:3.2f} | Base vs All {:3.2f} | All prior {:3.2f}]".format(
top5_novel, top5_base, top5_all, top5_novel_all, top5_base_all, top5_all_prior,
)
)
self.logger.info(
"==> Top 1 Accuracies: [Novel: {:3.2f} | Base: {:3.2f} | All {:3.2f} | Novel vs All {:3.2f} | Base vs All {:3.2f} | All prior {:3.2f}]".format(
top1_novel, top1_base, top1_all, top1_novel_all, top1_base_all, top1_all_prior,
)
)
results_array = np.array(
[
top5_novel,
top5_base,
top5_all,
top5_novel_all,
top5_base_all,
top5_all_prior,
top1_novel,
top1_base,
top1_all,
top1_novel_all,
top1_base_all,
top1_all_prior,
]
).reshape(1, -1)
return results_array
def lowshot_avg_results(self, results_all, exp_id=""):
results_all = np.concatenate(results_all, axis=0)
num_eval_experiments = results_all.shape[0]
mu_results = results_all.mean(axis=0)
top5_novel = mu_results[0]
top5_base = mu_results[1]
top5_all = mu_results[2]
top5_novel_all = mu_results[3]
top5_base_all = mu_results[4]
top5_all_prior = mu_results[5]
top1_novel = mu_results[6]
top1_base = mu_results[7]
top1_all = mu_results[8]
top1_novel_all = mu_results[9]
top1_base_all = mu_results[10]
top1_all_prior = mu_results[11]
std_results = results_all.std(axis=0)
ci95_results = 1.96 * std_results / np.sqrt(results_all.shape[0])
top5_novel_ci95 = ci95_results[0]
top5_base_ci95 = ci95_results[1]
top5_all_ci95 = ci95_results[2]
top5_novel_all_ci95 = ci95_results[3]
top5_base_all_ci95 = ci95_results[4]
top5_all_prior_ci95 = ci95_results[5]
top1_novel_ci95 = ci95_results[6]
top1_base_ci95 = ci95_results[7]
top1_all_ci95 = ci95_results[8]
top1_novel_all_ci95 = ci95_results[9]
top1_base_all_ci95 = ci95_results[10]
top1_all_prior_ci95 = ci95_results[11]
self.logger.info("----------------------------------------------------------------")
self.logger.info(f"Average results of {num_eval_experiments} experiments: {exp_id}")
self.logger.info(
"==> Top 5 Accuracies: [Novel: {:3.2f} | Base: {:3.2f} | All {:3.2f} | Novel vs All {:3.2f} | Base vs All {:3.2f} | All prior {:3.2f}]".format(
top5_novel, top5_base, top5_all, top5_novel_all, top5_base_all, top5_all_prior,
)
)
self.logger.info(
"==> Top 5 conf. intervals: [Novel: {:3.2f} | Base: {:3.2f} | All {:3.2f} | Novel vs All {:3.2f} | Base vs All {:3.2f} | All prior {:3.2f}]".format(
top5_novel_ci95,
top5_base_ci95,
top5_all_ci95,
top5_novel_all_ci95,
top5_base_all_ci95,
top5_all_prior_ci95,
)
)
self.logger.info("----------------------------------------------------------------")
self.logger.info(
"==> Top 1 Accuracies: [Novel: {:3.2f} | Base: {:3.2f} | All {:3.2f} | Novel vs All {:3.2f} | Base vs All {:3.2f} | All prior {:3.2f}]".format(
top1_novel, top1_base, top1_all, top1_novel_all, top1_base_all, top1_all_prior,
)
)
self.logger.info(
"==> Top 1 conf. intervals: [Novel: {:3.2f} | Base: {:3.2f} | All {:3.2f} | Novel vs All {:3.2f} | Base vs All {:3.2f} | All prior {:3.2f}]".format(
top1_novel_ci95,
top1_base_ci95,
top1_all_ci95,
top1_novel_all_ci95,
top1_base_all_ci95,
top1_all_prior_ci95,
)
)
self.logger.info("----------------------------------------------------------------")
results = {
"top5_novel": round(top5_novel, 2),
"top5_base": round(top5_base, 2),
"top5_all": round(top5_all, 2),
"top5_novel_all": round(top5_novel_all, 2),
"top5_base_all": round(top5_base_all, 2),
"top5_all_prior": round(top5_all_prior, 2),
"top5_novel_ci95": round(top5_novel_ci95, 2),
"top5_base_ci95": round(top5_base_ci95, 2),
"top5_all_ci95": round(top5_all_ci95, 2),
"top5_novel_all_ci95": round(top5_novel_all_ci95, 2),
"top5_base_all_ci95": round(top5_base_all_ci95, 2),
"top5_all_prior_ci95": round(top5_all_prior_ci95, 2),
}
return results
def evaluate(self, dloader, num_eval_exp=20, prior=0.8, suffix=""):
self.logger.info("Evaluating: %s" % os.path.basename(self.exp_dir))
self.logger.info("Num exemplars: %d" % dloader.n_exemplars)
self.logger.info("Num evaluation experiments: %d" % num_eval_exp)
self.logger.info("Prior: %f" % prior)
results = []
# Run args_opt.num_exp different number of evaluation experiments (each
# time sampling a different set of training images for the the novel
# categories).
for exp_id in range(num_eval_exp):
# Sample training data for the novel categories from the training
# set of ImageNet.
nove_cat_data = dloader.sample_training_data_for_novel_categories(exp_id=exp_id)
# Feed the training data of the novel categories to the algorithm.
self.add_novel_categories(nove_cat_data)
# Evaluate on the validation images of ImageNet.
results_this = self.evaluate_model_on_test_images(
data_loader=dloader,
base_classes=dloader.base_category_label_indices(),
novel_classes=dloader.novel_category_label_indices(),
exp_id="Exp_id = " + str(exp_id),
prior_m=prior,
)
results.append(results_this)
# Print the average results.
self.logger.info("Evaluating: %s" % os.path.basename(self.exp_dir))
avg_results = self.lowshot_avg_results(results, exp_id="")
eval_stats = utils.DAverageMeter("eval", self._run)
eval_stats.update(avg_results)
eval_stats.log()
self.add_stats_to_tensorboard_writer(eval_stats.average(), "test_")
return eval_stats
|
helpers.py | cubean/ImageSimilarityUsingCntk | 130 | 12695041 | # -*- coding: utf-8 -*-
import sys, os, importlib, pdb, random, datetime, collections, pickle, cv2, requests
import matplotlib.pyplot as plt, numpy as np, scipy.spatial.distance
from sklearn import svm, metrics, calibration
from PIL import Image, ExifTags
random.seed(0)
################################
# ImageInfo class and helpers
################################
class ImageInfo(object):
allFeatures = []
def __init__(self, fname, subdir, parent = None):
self.fname = fname
self.subdir = subdir
self.children = []
self.parent = parent
if parent:
self.parent = self.shallowCopy(parent)
def getFeat(self):
if self.allFeatures == []:
raise Exception("Need to set/load DNN features first using e.g. this line 'ImageInfo.allFeatures = loadFromPickle(featuresPath)'")
key = self.subdir + "/" + self.fname
feat = np.array(self.allFeatures[key], np.float32)
assert (len(feat) == 4096 or len(feat) == 2048 or len(feat) == 512 or len(feat) == 25088)
return feat
def getImg(self, rootDir):
imgPath = self.getImgPath(rootDir)
return imread(imgPath)
def getImgPath(self, rootDir):
return rootDir + self.subdir + "/" + self.fname
def addChild(self, node):
node.parent = self
self.children.append(node)
def isSameClassAsParent(self):
return self.subdir == self.parent.subdir
def shallowCopy(self, node):
return ImageInfo(node.fname, node.subdir, node.parent)
def display(self):
print("Parent: " + self.node2Str(self))
for childIndex,child in enumerate(self.children):
print(" Child {:4} : {}".format(childIndex, self.node2Str(child)))
def node2Str(self, node):
return("fname = {}, subdir={}".format(node.fname, node.subdir)) #, node.parent)
def getImgPaths(imgInfos, rootDir=""):
paths = set()
for imgInfo in imgInfos:
paths.add(rootDir + "/" + imgInfo.subdir + "/" + imgInfo.fname)
for child in imgInfo.children:
paths.add(rootDir + "/" + child.subdir + "/" + child.fname)
return paths
def getRandomImgInfo(imgFilenames, subdirToExclude = None):
subdirs = list(imgFilenames.keys())
subdir = getRandomListElement(subdirs)
while subdir == subdirToExclude:
subdir = getRandomListElement(subdirs)
imgFilename = getRandomListElement(imgFilenames[subdir])
return ImageInfo(imgFilename, subdir)
################################
# helper functions - svm
################################
def getImgPairsFeatures(imgInfos, metric, boL2Normalize):
feats = []
labels = []
for queryImgIndex, queryImgInfo in enumerate(imgInfos):
queryFeat = queryImgInfo.getFeat()
if boL2Normalize:
queryFeat /= np.linalg.norm(queryFeat, 2)
for refImgInfo in queryImgInfo.children:
refFeat = refImgInfo.getFeat()
if boL2Normalize:
refFeat /= np.linalg.norm(refFeat, 2)
# Evaluate difference between the two images
featDiff = queryFeat - refFeat
if metric.lower() == 'diff':
feat = featDiff
elif metric.lower() == 'l1':
feat = abs(featDiff)
elif metric.lower() == 'l2':
feat = featDiff ** 2
else:
raise Exception("Unknown metric: " + metric)
feats.append(np.float32(feat))
labels.append(int(refImgInfo.isSameClassAsParent()))
return feats, labels
def mineHardNegatives(learner, imgFilenames, nrAddPerIter, featureDifferenceMetric, boL2Normalize,
maxNrRounds, initialThreshold = 1):
hardNegatives = []
roundCounterHardNegFound = 0
hardNegThreshold = initialThreshold
# Hard negative mining by repeatedly selecting a pair of images and adding to the
# training set if they are misclassified by at least a certain threshold.
for roundCounter in range(maxNrRounds):
roundCounterHardNegFound += 1
if len(hardNegatives) >= nrAddPerIter:
break
# Reduce threshold if no hard negative found after 1000 rounds
if roundCounterHardNegFound > 1000:
hardNegThreshold /= 2.0
roundCounterHardNegFound = 0
print(" Hard negative mining sampling round {:6d}: found {:4d} number of hard negatives; reducing hard negative threshold to {:3.3f}.".format(
roundCounter, len(hardNegatives), hardNegThreshold))
# Sample two images from different ground truth class
ImageInfo1 = getRandomImgInfo(imgFilenames)
ImageInfo2 = getRandomImgInfo(imgFilenames, ImageInfo1.subdir)
ImageInfo1.addChild(ImageInfo2)
# Evaluate svm
featCandidate, labelCandidate = getImgPairsFeatures([ImageInfo1], featureDifferenceMetric, boL2Normalize)
assert (len(labelCandidate) == 1 and labelCandidate[0] == 0 and ImageInfo1.subdir != ImageInfo2.subdir)
score = learner.decision_function(featCandidate)
# If confidence is sufficiently high then add to list of hard negatives
if score > hardNegThreshold:
hardNegatives.append(featCandidate[0])
roundCounterHardNegFound = 0
print(" Hard negatives found: {}, after {} sampling rounds".format(len(hardNegatives), roundCounter+1))
return hardNegatives
def getSampleWeights(labels, negPosRatio = 1):
indsNegatives = np.where(np.array(labels) == 0)[0]
indsPositives = np.where(np.array(labels) != 0)[0]
negWeight = float(negPosRatio) * len(indsPositives) / len(indsNegatives)
weights = np.array([1.0] * len(labels))
weights[indsNegatives] = negWeight
assert (abs(sum(weights[indsNegatives]) - negPosRatio * sum(weights[indsPositives])) < 10 ** -3)
return weights
def plotScoreVsProbability(learner, feats_train, feats_test):
probsTest = learner.predict_proba(feats_test)[:, 1]
probsTrain = learner.predict_proba(feats_train)[:, 1]
scoresTest = learner.base_estimator.decision_function(feats_test)
scoresTrain = learner.base_estimator.decision_function(feats_train)
plt.scatter(scoresTrain, probsTrain, c='r', label = 'train')
plt.scatter(scoresTest, probsTest, c='b', label = 'test')
plt.ylim([-0.02, 1.02])
plt.xlabel('SVM score')
plt.ylabel('Probability')
plt.title('Calibrated SVM - training set (red), test set (blue)')
return plt
################################
# helper functions - general
################################
def getImagePairs(imgFilenames, maxQueryImgsPerSubdir, maxNegImgsPerQueryImg):
# Get sub-directories with at least two images in them
querySubdirs = [s for s in imgFilenames.keys() if len(imgFilenames[s]) > 1]
# Generate pos and neg pairs for each subdir
imgInfos = []
for querySubdir in querySubdirs:
queryFilenames = randomizeList(imgFilenames[querySubdir])
# Pick at most 'maxQueryImgsPerSubdir' query images at random
for queryFilename in queryFilenames[:maxQueryImgsPerSubdir]:
queryInfo = ImageInfo(queryFilename, querySubdir)
# Add one positive example at random
refFilename = getRandomListElement(list(set(queryFilenames) - set([queryFilename])))
queryInfo.children.append(ImageInfo(refFilename, querySubdir, queryInfo))
assert(refFilename != queryFilename)
# Add multiple negative examples at random
for _ in range(maxNegImgsPerQueryImg):
refSubdir = getRandomListElement(list(set(querySubdirs) - set([querySubdir])))
refFilename = getRandomListElement(imgFilenames[refSubdir])
queryInfo.children.append(ImageInfo(refFilename, refSubdir, queryInfo))
assert(refSubdir != querySubdir)
# Store
queryInfo.children = randomizeList(queryInfo.children)
imgInfos.append(queryInfo)
print("Generated image pairs for {} query images, each with 1 positive image pair and {} negative image pairs.".format(len(imgInfos), maxNegImgsPerQueryImg))
return imgInfos
def getImgLabelMap(imgFilenames, imgDir, lut = None):
table = []
for label in imgFilenames.keys():
for imgFilename in imgFilenames[label]:
imgPath = imgDir + "/" + str(label) + "/" + imgFilename
if lut != None:
table.append((imgPath, lut[label]))
else:
table.append((imgPath, label))
return table
def balanceDatasetUsingDuplicates(data):
duplicates = []
counts = collections.Counter(getColumn(data,1))
print("Before balancing of training set:")
for item in counts.items():
print(" Class {:3}: {:5} exmples".format(*item))
# Get duplicates to balance dataset
targetCount = max(getColumn(counts.items(), 1))
while min(getColumn(counts.items(),1)) < targetCount:
for imgPath, label in data:
if counts[label] < targetCount:
duplicates.append((imgPath, label))
counts[label] += 1
# Add duplicates to original dataset
print("After balancing: all classes now have {} images; added {} duplicates to the {} original images.".format(targetCount, len(duplicates), len(data)))
data += duplicates
counts = collections.Counter(getColumn(data,1))
assert(min(counts.values()) == max(counts.values()) == targetCount)
return data
def printFeatLabelInfo(title, feats, labels, preString = " "):
print(title)
print(preString + "Number of examples: {}".format(len(feats)))
print(preString + "Number of positive examples: {}".format(sum(np.array(labels) == 1)))
print(preString + "Number of negative examples: {}".format(sum(np.array(labels) == 0)))
print(preString + "Dimension of each example: {}".format(len(feats[0])))
def sklearnAccuracy(learner, feats, gtLabels):
estimatedLabels = learner.predict(feats)
confusionMatrix = metrics.confusion_matrix(gtLabels, estimatedLabels)
return accsConfusionMatrix(confusionMatrix)
####################################
# Subset of helper library
# used in image similarity tutorial
####################################
# Typical meaning of variable names -- Computer Vision:
# pt = 2D point (column,row)
# img = image
# width,height (or w/h) = image dimensions
# bbox = bbox object (stores: left, top,right,bottom co-ordinates)
# rect = rectangle (order: left, top, right, bottom)
# angle = rotation angle in degree
# scale = image up/downscaling factor
# Typical meaning of variable names -- general:
# lines,strings = list of strings
# line,string = single string
# xmlString = string with xml tags
# table = 2D row/column matrix implemented using a list of lists
# row,list1D = single row in a table, i.e. single 1D-list
# rowItem = single item in a row
# list1D = list of items, not necessarily strings
# item = single item of a list1D
# slotValue = e.g. "terminator" in: play <movie> terminator </movie>
# slotTag = e.g. "<movie>" or "</movie>" in: play <movie> terminator </movie>
# slotName = e.g. "movie" in: play <movie> terminator </movie>
# slot = e.g. "<movie> terminator </movie>" in: play <movie> terminator </movie>
def readFile(inputFile):
# Reading as binary, to avoid problems with end-of-text characters.
# Note that readlines() does not remove the line ending characters
with open(inputFile,'rb') as f:
lines = f.readlines()
for i,s in enumerate(lines):
removeLineEndCharacters(s.decode('utf8'))
return [removeLineEndCharacters(s.decode('utf8')) for s in lines];
def writeFile(outputFile, lines, header=None):
with open(outputFile,'w') as f:
if header != None:
f.write("%s\n" % header)
for line in lines:
f.write("%s\n" % line)
def writeBinaryFile(outputFile, data):
with open(outputFile,'wb') as f:
bytes = f.write(data)
return bytes
def readTable(inputFile, delimiter='\t'):
lines = readFile(inputFile);
return splitStrings(lines, delimiter)
def writeTable(outputFile, table, header=None):
lines = tableToList1D(table)
writeFile(outputFile, lines, header)
def loadFromPickle(inputFile):
with open(inputFile, 'rb') as filePointer:
data = pickle.load(filePointer)
return data
def saveToPickle(outputFile, data):
p = pickle.Pickler(open(outputFile,"wb"))
p.fast = True
p.dump(data)
def makeDirectory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def getFilesInDirectory(directory, postfix=""):
if not os.path.exists(directory):
return []
fileNames = [s for s in os.listdir(directory) if not os.path.isdir(directory + "/" + s)]
if not postfix or postfix == "":
return fileNames
else:
return [s for s in fileNames if s.lower().endswith(postfix)]
def getDirectoriesInDirectory(directory):
return [s for s in os.listdir(directory) if os.path.isdir(directory + "/" + s)]
def downloadFromUrl(url, boVerbose = True):
data = []
url = url.strip()
try:
r = requests.get(url, timeout = 1)
data = r.content
except:
if boVerbose:
print('Error downloading url {0}'.format(url))
if boVerbose and data == []: # and r.status_code != 200:
print('Error {} downloading url {}'.format(r.status_code, url))
return data
def removeLineEndCharacters(line):
if line.endswith('\r\n'):
return line[:-2]
elif line.endswith('\n'):
return line[:-1]
else:
return line
def splitString(string, delimiter='\t', columnsToKeepIndices=None):
if string == None:
return None
items = string.split(delimiter)
if columnsToKeepIndices != None:
items = getColumns([items], columnsToKeepIndices)
items = items[0]
return items
def splitStrings(strings, delimiter, columnsToKeepIndices=None):
table = [splitString(string, delimiter, columnsToKeepIndices) for string in strings]
return table
def getColumn(table, columnIndex):
column = []
for row in table:
column.append(row[columnIndex])
return column
def tableToList1D(table, delimiter='\t'):
return [delimiter.join([str(s) for s in row]) for row in table]
def ToIntegers(list1D):
return [int(float(x)) for x in list1D]
def mergeDictionaries(dict1, dict2):
tmp = dict1.copy()
tmp.update(dict2)
return tmp
def getRandomNumber(low, high):
randomNumber = random.randint(low,high)
return randomNumber
def randomizeList(listND, containsHeader=False):
if containsHeader:
header = listND[0]
listND = listND[1:]
random.shuffle(listND)
if containsHeader:
listND.insert(0, header)
return listND
def getRandomListElement(listND, containsHeader=False):
if containsHeader:
index = getRandomNumber(1, len(listND) - 1)
else:
index = getRandomNumber(0, len(listND) - 1)
return listND[index]
def accsConfusionMatrix(confMatrix):
perClassAccs = [(1.0 * row[rowIndex] / sum(row)) for rowIndex,row in enumerate(confMatrix)]
return perClassAccs
def computeVectorDistance(vec1, vec2, method, boL2Normalize, weights = [], bias = [], learner = []):
# Pre-processing
if boL2Normalize:
vec1 = vec1 / np.linalg.norm(vec1, 2)
vec2 = vec2 / np.linalg.norm(vec2, 2)
assert (len(vec1) == len(vec2))
# Distance computation
vecDiff = vec1 - vec2
method = method.lower()
if method == 'random':
dist = random.random()
elif method == 'l1':
dist = sum(abs(vecDiff))
elif method == 'l2':
dist = np.linalg.norm(vecDiff, 2)
elif method == 'normalizedl2':
a = vec1 / np.linalg.norm(vec1, 2)
b = vec2 / np.linalg.norm(vec2, 2)
dist = np.linalg.norm(a - b, 2)
elif method == "cosine":
dist = scipy.spatial.distance.cosine(vec1, vec2)
elif method == "correlation":
dist = scipy.spatial.distance.correlation(vec1, vec2)
elif method == "chisquared":
dist = chiSquared(vec1, vec2)
elif method == "normalizedchisquared":
a = vec1 / sum(vec1)
b = vec2 / sum(vec2)
dist = chiSquared(a, b)
elif method == "hamming":
dist = scipy.spatial.distance.hamming(vec1 > 0, vec2 > 0)
elif method == "mahalanobis":
#assumes covariance matric is provided, e..g. using: sampleCovMat = np.cov(np.transpose(np.array(feats)))
dist = scipy.spatial.distance.mahalanobis(vec1, vec2, sampleCovMat)
elif method == 'weightedl1':
feat = np.float32(abs(vecDiff))
dist = np.dot(weights, feat) + bias
dist = -float(dist)
# assert(abs(dist - learnerL1.decision_function([feat])) < 0.000001)
elif method == 'weightedl2':
feat = (vecDiff) ** 2
dist = np.dot(weights, feat) + bias
dist = -float(dist)
elif method == 'weightedl2prob':
feat = (vecDiff) ** 2
dist = learner.predict_proba([feat])[0][1]
dist = float(dist)
# elif method == 'learnerscore':
# feat = (vecDiff) ** 2
# dist = learner.base_estimator.decision_function([feat])[0]
# dist = -float(dist)
else:
raise Exception("Distance method unknown: " + method)
assert (not np.isnan(dist))
return dist
def rotationFromExifTag(imgPath):
TAGSinverted = {v: k for k, v in list(ExifTags.TAGS.items())}
orientationExifId = TAGSinverted['Orientation']
try:
imageExifTags = Image.open(imgPath)._getexif()
except:
imageExifTags = None
#rotate the image if orientation exif tag is present
rotation = 0
if imageExifTags != None and orientationExifId != None and orientationExifId in imageExifTags:
orientation = imageExifTags[orientationExifId]
if orientation == 1 or orientation == 0:
rotation = 0 #no need to do anything
elif orientation == 6:
rotation = -90
elif orientation == 8:
rotation = 90
else:
raise Exception("ERROR: orientation = " + str(orientation) + " not_supported!")
return rotation
def imread(imgPath, boThrowErrorIfExifRotationTagSet = True):
if not os.path.exists(imgPath):
raise Exception("ERROR: image path does not exist.")
rotation = rotationFromExifTag(imgPath)
if boThrowErrorIfExifRotationTagSet and rotation != 0:
print("Error: exif roation tag set, image needs to be rotated by %d degrees." % rotation)
img = cv2.imread(imgPath)
if img is None:
raise Exception("ERROR: cannot load image " + imgPath)
if rotation != 0:
img = imrotate(img, -90).copy() # To avoid occassional error: "TypeError: Layout of the output array img is incompatible with cv::Mat"
return img
def imWidth(input):
return imWidthHeight(input)[0]
def imHeight(input):
return imWidthHeight(input)[1]
def imWidthHeight(input):
if type(input) is str: #or type(input) is unicode:
width, height = Image.open(input).size # This does not load the full image
else:
width = input.shape[1]
height = input.shape[0]
return width,height
def imconvertCv2Numpy(img):
(b,g,r) = cv2.split(img)
return cv2.merge([r,g,b])
def imconvertCv2Pil(img):
cv2_im = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(cv2_im)
return pil_im
def imconvertPil2Cv(pilImg):
return imconvertPil2Numpy(pilImg)[:, :, ::-1]
def imconvertPil2Numpy(pilImg):
rgb = pilImg.convert('RGB')
return np.array(rgb).copy()
def imresize(img, scale, interpolation = cv2.INTER_LINEAR):
return cv2.resize(img, (0,0), fx=scale, fy=scale, interpolation=interpolation)
def imresizeMaxDim(img, maxDim, boUpscale = False, interpolation = cv2.INTER_LINEAR):
scale = 1.0 * maxDim / max(img.shape[:2])
if scale < 1 or boUpscale:
img = imresize(img, scale, interpolation)
else:
scale = 1.0
return img, scale
def imresizeAndPad(img, width, height, padColor):
# resize image
imgWidth, imgHeight = imWidthHeight(img)
scale = min(float(width) / float(imgWidth), float(height) / float(imgHeight))
imgResized = imresize(img, scale) #, interpolation=cv2.INTER_NEAREST)
resizedWidth, resizedHeight = imWidthHeight(imgResized)
# pad image
top = int(max(0, np.round((height - resizedHeight) / 2)))
left = int(max(0, np.round((width - resizedWidth) / 2)))
bottom = height - top - resizedHeight
right = width - left - resizedWidth
return cv2.copyMakeBorder(imgResized, top, bottom, left, right,
cv2.BORDER_CONSTANT, value=padColor)
def imrotate(img, angle):
imgPil = imconvertCv2Pil(img)
imgPil = imgPil.rotate(angle, expand=True)
return imconvertPil2Cv(imgPil)
def imshow(img, waitDuration=0, maxDim = None, windowName = 'img'):
if isinstance(img, str): # Test if 'img' is a string
img = cv2.imread(img)
if maxDim is not None:
scaleVal = 1.0 * maxDim / max(img.shape[:2])
if scaleVal < 1:
img = imresize(img, scaleVal)
cv2.imshow(windowName, img)
cv2.waitKey(waitDuration) |
saas/aiops/api/aiops-server/ai_lib/time_series_prediction/algorithm/preprocess/DataPreprocessor.py | iuskye/SREWorks | 407 | 12695042 | <filename>saas/aiops/api/aiops-server/ai_lib/time_series_prediction/algorithm/preprocess/DataPreprocessor.py
# -*- coding: utf-8 -*-
# @author: 丛戎
# @target: 时序预测算法的预处理逻辑
import sys
from ai_lib.time_series_prediction.algorithm.preprocess.data_preprocess_utils import DataPreprocessUtils
PY2 = sys.version_info[0] == 2
class DataPreprocessor(object):
def data_preprocess(self, kpidata, colname, interval=None, period=None, process_method={}, user_config={}):
"""
:param kpidata:
:param colname:
:param interval: 数据点间隔,单位为秒
:param period: 一个周期内的点数
:param process_method: 预处理逻辑的参数,{'fillna':{'startdate': 0, 'enddate':0, 'interval' : '5min', 'fillvalue' :True}}
:param user_config: 用户在页面的高级配置
:return:
"""
preprocessor = DataPreprocessUtils()
# 首先对数据进行去重处理
kpidata = kpidata.drop_duplicates(['ts'])
# 并按ts进行排序
kpidata = kpidata.sort_values(by=['ts'], ascending=[1])
# 填补缺失值操作
if 'fillna' in process_method.keys():
# 根据数据粒度计算插值频率
interval_seconds = str(interval) + 'S'
fillvalue = process_method['fillna']['fillvalue']
if "fillna_withzero" in user_config['advanced_config_map'].keys():
fillvalue = False
kpidata = preprocessor.ts_fill_na(kpidata=kpidata, startdate=kpidata['ts'].min(),
enddate=kpidata['ts'].max(),
freq=interval_seconds,
fillvalue=fillvalue)
return kpidata |
example/geopage_nospatial/models.py | eduardocp/wagtail-geo-widget | 105 | 12695047 | from __future__ import absolute_import, unicode_literals
from django.db import models
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from wagtail.core.models import Page
from wagtail.core.fields import StreamField
from wagtail.admin.edit_handlers import StreamFieldPanel
from wagtail.core import blocks
from wagtail.admin.edit_handlers import FieldPanel, MultiFieldPanel
from wagtailgeowidget.edit_handlers import GeoPanel
from wagtailgeowidget.blocks import GeoBlock
class StandardPage(Page):
address = models.CharField(max_length=250, blank=True, null=True)
location = models.CharField(max_length=250, blank=True, null=True)
content_panels = Page.content_panels + [
MultiFieldPanel([
FieldPanel('address'),
GeoPanel('location', address_field='address'),
], _('Geo details')),
]
def get_context(self, request):
data = super(StandardPage, self).get_context(request)
return data
@cached_property
def point(self):
from wagtailgeowidget.helpers import geosgeometry_str_to_struct
return geosgeometry_str_to_struct(self.location)
@property
def lat(self):
return self.point['y']
@property
def lng(self):
return self.point['x']
class StreamPage(Page):
body = StreamField([
('map', GeoBlock()),
('map_struct', blocks.StructBlock([
('address', blocks.CharBlock(required=True)),
# ('map', GeoBlock(address_field='address')),
], icon='user'))
])
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
|
sample.py | sphh/python-colorlog | 745 | 12695058 | <gh_stars>100-1000
import logging
import colorlog
fmt = "{log_color}{levelname} {name}: {message}"
colorlog.basicConfig(level=logging.DEBUG, style="{", format=fmt, stream=None)
log = logging.getLogger()
log.warning("hello")
|
Python/Collection/CloneList.py | piovezan/SOpt | 148 | 12695071 | <reponame>piovezan/SOpt
current = [0, 1]
someList = []
while True:
for n in range(0, 2):
current[n] += 1
print(current)
someList.append(current[:]) #aqui faz uma cópia da lista e usa referêwncia para esta cópia, não a original
if current == [2, 3]:
break
print(someList)
#https://pt.stackoverflow.com/q/425908/101
|
src/olympia/bandwagon/migrations/0001_initial.py | covariant/addons-server | 843 | 12695084 | # Generated by Django 2.2.5 on 2019-09-12 13:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import olympia.amo.fields
import olympia.amo.models
import olympia.translations.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('translations', '__first__'),
('addons', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Collection',
fields=[
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('uuid', models.UUIDField(blank=True, null=True, unique=True)),
('nickname', models.CharField(blank=True, max_length=30, null=True, unique=True)),
('slug', models.CharField(blank=True, max_length=30, null=True)),
('default_locale', models.CharField(db_column='defaultlocale', default='en-US', max_length=10)),
('type', models.PositiveIntegerField(choices=[(0, 'Normal'), (1, 'Synchronized'), (2, 'Featured'), (3, 'Generated Recommendations'), (4, 'Favorites'), (5, 'Mobile'), (6, 'Anonymous')], db_column='collection_type', default=0)),
('listed', models.BooleanField(default=True, help_text='Collections are either listed or private.')),
('application', models.PositiveIntegerField(blank=True, choices=[(1, 'Firefox'), (61, 'Firefox for Android')], db_column='application_id', null=True)),
('addon_count', models.PositiveIntegerField(db_column='addonCount', default=0)),
],
options={
'db_table': 'collections',
'get_latest_by': 'created',
'abstract': False,
'base_manager_name': 'objects',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='FeaturedCollection',
fields=[
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('application', models.PositiveIntegerField(choices=[(1, 'Firefox'), (61, 'Firefox for Android')], db_column='application_id')),
('locale', models.CharField(max_length=10, null=True)),
('collection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bandwagon.Collection')),
],
options={
'db_table': 'featured_collections',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='CollectionAddon',
fields=[
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('ordering', models.PositiveIntegerField(default=0, help_text='Add-ons are displayed in ascending order based on this field.')),
('addon', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='addons.Addon')),
('collection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bandwagon.Collection')),
('comments', olympia.translations.fields.LinkifiedField(blank=True, db_column='comments', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='CollectionAddon_comments_set+', require_locale=True, short=True, to='translations.LinkifiedTranslation', to_field='id', unique=True)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'addons_collections',
'get_latest_by': 'created',
'abstract': False,
'base_manager_name': 'objects',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.AddField(
model_name='collection',
name='addons',
field=models.ManyToManyField(related_name='collections', through='bandwagon.CollectionAddon', to='addons.Addon'),
),
migrations.AddField(
model_name='collection',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='collections', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='collection',
name='description',
field=olympia.translations.fields.NoURLsField(blank=True, db_column='description', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='Collection_description_set+', require_locale=False, short=True, to='translations.NoURLsTranslation', to_field='id', unique=True),
),
migrations.AddField(
model_name='collection',
name='name',
field=olympia.translations.fields.TranslatedField(blank=True, db_column='name', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='Collection_name_set+', require_locale=False, short=True, to='translations.Translation', to_field='id', unique=True),
),
migrations.AddIndex(
model_name='featuredcollection',
index=models.Index(fields=['application'], name='application_id_idx'),
),
migrations.AddIndex(
model_name='collectionaddon',
index=models.Index(fields=['collection', 'created'], name='created_idx'),
),
migrations.AddIndex(
model_name='collectionaddon',
index=models.Index(fields=['addon'], name='addon_id'),
),
migrations.AddIndex(
model_name='collectionaddon',
index=models.Index(fields=['collection'], name='collection_id'),
),
migrations.AddIndex(
model_name='collectionaddon',
index=models.Index(fields=['user'], name='user_id'),
),
migrations.AddConstraint(
model_name='collectionaddon',
constraint=models.UniqueConstraint(fields=('addon', 'collection'), name='addon_id_2'),
),
migrations.AddIndex(
model_name='collection',
index=models.Index(fields=['application'], name='application_id'),
),
migrations.AddIndex(
model_name='collection',
index=models.Index(fields=['created'], name='created_idx'),
),
migrations.AddIndex(
model_name='collection',
index=models.Index(fields=['listed'], name='listed'),
),
migrations.AddIndex(
model_name='collection',
index=models.Index(fields=['slug'], name='slug_idx'),
),
migrations.AddIndex(
model_name='collection',
index=models.Index(fields=['type'], name='type_idx'),
),
migrations.AddConstraint(
model_name='collection',
constraint=models.UniqueConstraint(fields=('author', 'slug'), name='author_id'),
),
]
|
xam/model_selection/ordered_cross_validation.py | vishalbelsare/xam | 357 | 12695105 | import numpy as np
import pandas as pd
from sklearn.model_selection import BaseCrossValidator
class OrderedCV(BaseCrossValidator):
"""Cross-validation procedure with order of indexes taken into account.
Say you have an interval [a, b] and you want to make n splits with d test
indexes at each split -- for example 7 days. Then DatetimeCV will
return the n following splits:
- [a, b - d], [b - d, b]
- [a, b - 2*d], [b - 2*d, b - d]
- ...
- [a, b - (n-1)*d], [b - (n-1)*d, b - (n-2)*d]
- [a, b - n*d], [b - n*d, (n-1)*b]
Attributes:
n_splits (int): the number of desired splits.
delta (int or datetime.timedelta): the step to increase folds by.
"""
def __init__(self, n_splits, delta):
super().__init__()
self.n_splits = n_splits
self.delta = delta
def split(self, X, y=None, groups=None):
"""
Args:
X (pd.DataFrame): a pd.DataFrame.
"""
if not isinstance(X, pd.DataFrame):
raise ValueError('X is not a pandas.DataFrame')
min_dt = X.index.min()
max_dt = X.index.max()
indices = np.arange(len(X))
for i in range(self.n_splits):
t0 = min_dt
t1 = max_dt - self.delta * (i + 1)
t2 = max_dt - self.delta * i
train_idxs = indices[(X.index >= t0) & (X.index <= t1)]
test_idxs = indices[(X.index > t1) & (X.index <= t2)]
if train_idxs.size == 0:
raise ValueError('No data found in [{}, {}]'.format(t0, t1))
if test_idxs.size == 0:
raise ValueError('No data found in ({}, {}]'.format(t1, t2))
yield train_idxs, test_idxs
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator"""
return self.n_splits
|
Presentations/WG2_11/GlassBR_Code/generated/python/Interpolation.py | Danki567/Drasil | 114 | 12695111 | <filename>Presentations/WG2_11/GlassBR_Code/generated/python/Interpolation.py
from __future__ import print_function
import sys
import math
def lin_interp(x1, y1, x2, y2, x):
y = (((y2 - y1) / (x2 - x1)) * (x - x1)) + y1
return y
def indInSeq(arr, v):
for i in range(len(arr) - 1):
if ((arr[i] <= v) and (v <= arr[i + 1])) :
return i
raise Exception("Index not found")
def matrixCol(mat, c):
col = []
for i in range(len(mat)):
col.append(mat[i][c])
return col
def interpY(x_array, y_array, z_array, x, z):
i = indInSeq(z_array, z)
x_z1 = matrixCol(x_array, i)
y_z1 = matrixCol(y_array, i)
x_z2 = matrixCol(x_array, i + 1)
y_z2 = matrixCol(y_array, i + 1)
try :
j = indInSeq(x_z1, x)
k = indInSeq(x_z2, x)
except Exception as exc :
raise Exception("Interpolation of y failed.")
y1 = lin_interp(x_z1[j], y_z1[j], x_z1[j + 1], y_z1[j + 1], x)
y2 = lin_interp(x_z2[k], y_z2[k], x_z2[k + 1], y_z2[k + 1], x)
return lin_interp(z_array[i], y1, z_array[i + 1], y2, z)
def interpZ(x_array, y_array, z_array, x, y):
for i in range(len(z_array) - 1):
x_z1 = matrixCol(x_array, i)
y_z1 = matrixCol(y_array, i)
x_z2 = matrixCol(x_array, i + 1)
y_z2 = matrixCol(y_array, i + 1)
try :
j = indInSeq(x_z1, x)
k = indInSeq(x_z2, x)
except Exception as exc :
continue
y_lower = lin_interp(x_z1[j], y_z1[j], x_z1[j + 1], y_z1[j + 1], x)
y_upper = lin_interp(x_z2[k], y_z2[k], x_z2[k + 1], y_z2[k + 1], x)
if ((y_lower <= y) and (y <= y_upper)) :
return lin_interp(y_lower, z_array[i], y_upper, z_array[i + 1], y)
raise Exception("Interpolation of z failed.")
|
angr/procedures/linux_kernel/brk.py | Kyle-Kyle/angr | 6,132 | 12695122 | import angr
import logging
l = logging.getLogger(name=__name__)
class brk(angr.SimProcedure):
"""
This implements the brk system call.
"""
#pylint:disable=arguments-differ
def run(self, new_brk):
r = self.state.posix.set_brk(new_brk)
l.debug('brk(%s) = %s', new_brk, r)
return r
|
tests/validation/schema/test_enum_validation.py | maroux/flex | 160 | 12695133 | import six
import pytest
from flex.exceptions import ValidationError
from flex.constants import EMPTY
from flex.error_messages import MESSAGES
from tests.utils import (
generate_validator_from_schema,
assert_error_message_equal,
)
#
# minLength validation tests
#
@pytest.mark.parametrize(
'letters',
('a', 'b', True, 1, 2),
)
def test_enum_with_valid_array(letters):
schema = {
'enum': [2, 1, 'a', 'b', 'c', True, False],
}
validator = generate_validator_from_schema(schema)
validator(letters)
@pytest.mark.parametrize(
'letters',
(None, 1, 0, 2, 'a'),
)
def test_enum_with_invalid_items(letters):
schema = {
'enum': [True, False, 1.0, 2.0, 'A'],
}
validator = generate_validator_from_schema(schema)
with pytest.raises(ValidationError) as err:
validator(letters)
assert_error_message_equal(
err.value.messages[0]['enum'][0],
MESSAGES['enum']['invalid'],
)
def test_enum_noop_when_not_required_and_field_not_present():
schema = {
'enum': [True, False, 1.0, 2.0, 'A'],
}
validator = generate_validator_from_schema(schema)
validator(EMPTY)
@pytest.mark.parametrize(
'enum_value,value',
(
(six.text_type('test'), six.text_type('test')),
(six.text_type('test'), b'test'),
(b'test', six.text_type('test')),
(b'test', b'test'),
)
)
def test_enum_disperate_text_types(enum_value, value):
schema = {
'enum': [enum_value],
}
validator = generate_validator_from_schema(schema)
validator(value)
|
tests/em/nsem/inversion/test_Problem1D_Adjoint.py | Prithwijit-Chak/simpeg | 358 | 12695135 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import numpy as np
import unittest
from scipy.constants import mu_0
from SimPEG.electromagnetics import natural_source as nsem
from SimPEG import maps
TOL = 1e-4
FLR = 1e-20 # "zero", so if residual below this --> pass regardless of order
CONDUCTIVITY = 1e1
MU = mu_0
def JvecAdjointTest(sigmaHalf, formulation="PrimSec"):
forType = "PrimSec" not in formulation
survey, sigma, sigBG, m1d = nsem.utils.test_utils.setup1DSurvey(
sigmaHalf, tD=forType, structure=False
)
print("Adjoint test of e formulation for {:s} comp \n".format(formulation))
if "PrimSec" in formulation:
problem = nsem.Simulation1DPrimarySecondary(
m1d, survey=survey, sigmaPrimary=sigBG, sigmaMap=maps.IdentityMap(m1d)
)
else:
raise NotImplementedError(
"Only {} formulations are implemented.".format(formulation)
)
m = sigma
u = problem.fields(m)
np.random.seed(1983)
v = np.random.rand(survey.nD,)
# print problem.PropMap.PropModel.nP
w = np.random.rand(problem.mesh.nC,)
vJw = v.ravel().dot(problem.Jvec(m, w, u))
wJtv = w.ravel().dot(problem.Jtvec(m, v, u))
tol = np.max([TOL * (10 ** int(np.log10(np.abs(vJw)))), FLR])
print(" vJw wJtv vJw - wJtv tol abs(vJw - wJtv) < tol")
print(vJw, wJtv, vJw - wJtv, tol, np.abs(vJw - wJtv) < tol)
return np.abs(vJw - wJtv) < tol
class NSEM_1D_AdjointTests(unittest.TestCase):
def setUp(self):
pass
# Test the adjoint of Jvec and Jtvec
# def test_JvecAdjoint_zxxr(self):self.assertTrue(JvecAdjointTest(random(1e-2),'zxxr',.1))
# def test_JvecAdjoint_zxxi(self):self.assertTrue(JvecAdjointTest(random(1e-2),'zxxi',.1))
# def test_JvecAdjoint_zxyr(self):self.assertTrue(JvecAdjointTest(random(1e-2),'zxyr',.1))
# def test_JvecAdjoint_zxyi(self):self.assertTrue(JvecAdjointTest(random(1e-2),'zxyi',.1))
# def test_JvecAdjoint_zyxr(self):self.assertTrue(JvecAdjointTest(random(1e-2),'zyxr',.1))
# def test_JvecAdjoint_zyxi(self):self.assertTrue(JvecAdjointTest(random(1e-2),'zyxi',.1))
# def test_JvecAdjoint_zyyr(self):self.assertTrue(JvecAdjointTest(random(1e-2),'zyyr',.1))
# def test_JvecAdjoint_zyyi(self):self.assertTrue(JvecAdjointTest(random(1e-2),'zyyi',.1))
def test_JvecAdjoint_All(self):
self.assertTrue(JvecAdjointTest(1e-2))
if __name__ == "__main__":
unittest.main()
|
utils.py | dionysio/haveibeenpwned_lastpass | 101 | 12695139 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import monkeypatch_lastpass
import lastpass
def get_lastpass_vault(username, password, multifactor_password=None):
return lastpass.Vault.open_remote(username, password, multifactor_password) |
tests/xgboost/test_hook_save_scalar_xgboost.py | NRauschmayr/sagemaker-debugger | 133 | 12695154 | # Standard Library
import os
import time
from datetime import datetime
# Third Party
import numpy as np
import pytest
import xgboost
from tests.core.utils import check_tf_events, delete_local_trials, verify_files
# First Party
from smdebug.core.modes import ModeKeys
from smdebug.core.save_config import SaveConfig, SaveConfigMode
from smdebug.xgboost import Hook as XG_Hook
SMDEBUG_XG_HOOK_TESTS_DIR = "/tmp/test_output/smdebug_xg/tests/"
def simple_xg_model(hook, num_round=10, seed=42, with_timestamp=False):
np.random.seed(seed)
train_data = np.random.rand(5, 10)
train_label = np.random.randint(2, size=5)
dtrain = xgboost.DMatrix(train_data, label=train_label)
test_data = np.random.rand(5, 10)
test_label = np.random.randint(2, size=5)
dtest = xgboost.DMatrix(test_data, label=test_label)
params = {}
scalars_to_be_saved = dict()
ts = time.time()
hook.save_scalar(
"xg_num_steps", num_round, sm_metric=True, timestamp=ts if with_timestamp else None
)
scalars_to_be_saved["scalar/xg_num_steps"] = (ts, num_round)
ts = time.time()
hook.save_scalar(
"xg_before_train", 1, sm_metric=False, timestamp=ts if with_timestamp else None
)
scalars_to_be_saved["scalar/xg_before_train"] = (ts, 1)
hook.set_mode(ModeKeys.TRAIN)
xgboost.train(
params,
dtrain,
evals=[(dtrain, "train"), (dtest, "test")],
num_boost_round=num_round,
callbacks=[hook],
)
ts = time.time()
hook.save_scalar("xg_after_train", 1, sm_metric=False, timestamp=ts if with_timestamp else None)
scalars_to_be_saved["scalar/xg_after_train"] = (ts, 1)
return scalars_to_be_saved
def helper_xgboost_tests(collection, save_config, with_timestamp):
coll_name, coll_regex = collection
run_id = "trial_" + coll_name + "-" + datetime.now().strftime("%Y%m%d-%H%M%S%f")
trial_dir = os.path.join(SMDEBUG_XG_HOOK_TESTS_DIR, run_id)
hook = XG_Hook(
out_dir=trial_dir,
include_collections=[coll_name],
save_config=save_config,
export_tensorboard=True,
)
saved_scalars = simple_xg_model(hook, with_timestamp=with_timestamp)
hook.close()
verify_files(trial_dir, save_config, saved_scalars)
if with_timestamp:
check_tf_events(trial_dir, saved_scalars)
@pytest.mark.parametrize("collection", [("all", ".*"), ("scalars", "^scalar")])
@pytest.mark.parametrize(
"save_config",
[
SaveConfig(save_steps=[0, 2, 4, 6, 8]),
SaveConfig(
{
ModeKeys.TRAIN: SaveConfigMode(save_interval=2),
ModeKeys.GLOBAL: SaveConfigMode(save_interval=3),
ModeKeys.EVAL: SaveConfigMode(save_interval=1),
}
),
],
)
@pytest.mark.parametrize("with_timestamp", [True, False])
def test_xgboost_save_scalar(collection, save_config, with_timestamp):
helper_xgboost_tests(collection, save_config, with_timestamp)
delete_local_trials([SMDEBUG_XG_HOOK_TESTS_DIR])
|
lldb/test/API/lang/swift/foundation_value_types/global/TestSwiftFoundationTypeGlobal.py | LaudateCorpus1/llvm-project | 605 | 12695168 | <reponame>LaudateCorpus1/llvm-project
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftFoundationValueTypeGlobal(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@swiftTest
@skipUnlessFoundation
def test(self):
self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact())
self.assertTrue(target, VALID_TARGET)
# Target variable without a process.
# This is not actually expected to work, but also shouldn't crash.
self.expect("target variable g_url")
|
graphene_sqlalchemy/fields.py | oneome-llc/graphene-sqlalchemy | 947 | 12695183 | <reponame>oneome-llc/graphene-sqlalchemy
import enum
import warnings
from functools import partial
from promise import Promise, is_thenable
from sqlalchemy.orm.query import Query
from graphene import NonNull
from graphene.relay import Connection, ConnectionField
from graphene.relay.connection import connection_adapter, page_info_adapter
from graphql_relay.connection.arrayconnection import \
connection_from_array_slice
from .batching import get_batch_resolver
from .utils import EnumValue, get_query
class UnsortedSQLAlchemyConnectionField(ConnectionField):
@property
def type(self):
from .types import SQLAlchemyObjectType
type_ = super(ConnectionField, self).type
nullable_type = get_nullable_type(type_)
if issubclass(nullable_type, Connection):
return type_
assert issubclass(nullable_type, SQLAlchemyObjectType), (
"SQLALchemyConnectionField only accepts SQLAlchemyObjectType types, not {}"
).format(nullable_type.__name__)
assert (
nullable_type.connection
), "The type {} doesn't have a connection".format(
nullable_type.__name__
)
assert type_ == nullable_type, (
"Passing a SQLAlchemyObjectType instance is deprecated. "
"Pass the connection type instead accessible via SQLAlchemyObjectType.connection"
)
return nullable_type.connection
@property
def model(self):
return get_nullable_type(self.type)._meta.node._meta.model
@classmethod
def get_query(cls, model, info, **args):
return get_query(model, info.context)
@classmethod
def resolve_connection(cls, connection_type, model, info, args, resolved):
if resolved is None:
resolved = cls.get_query(model, info, **args)
if isinstance(resolved, Query):
_len = resolved.count()
else:
_len = len(resolved)
def adjusted_connection_adapter(edges, pageInfo):
return connection_adapter(connection_type, edges, pageInfo)
connection = connection_from_array_slice(
array_slice=resolved,
args=args,
slice_start=0,
array_length=_len,
array_slice_length=_len,
connection_type=adjusted_connection_adapter,
edge_type=connection_type.Edge,
page_info_type=page_info_adapter,
)
connection.iterable = resolved
connection.length = _len
return connection
@classmethod
def connection_resolver(cls, resolver, connection_type, model, root, info, **args):
resolved = resolver(root, info, **args)
on_resolve = partial(cls.resolve_connection, connection_type, model, info, args)
if is_thenable(resolved):
return Promise.resolve(resolved).then(on_resolve)
return on_resolve(resolved)
def wrap_resolve(self, parent_resolver):
return partial(
self.connection_resolver,
parent_resolver,
get_nullable_type(self.type),
self.model,
)
# TODO Rename this to SortableSQLAlchemyConnectionField
class SQLAlchemyConnectionField(UnsortedSQLAlchemyConnectionField):
def __init__(self, type_, *args, **kwargs):
nullable_type = get_nullable_type(type_)
if "sort" not in kwargs and issubclass(nullable_type, Connection):
# Let super class raise if type is not a Connection
try:
kwargs.setdefault("sort", nullable_type.Edge.node._type.sort_argument())
except (AttributeError, TypeError):
raise TypeError(
'Cannot create sort argument for {}. A model is required. Set the "sort" argument'
" to None to disabling the creation of the sort query argument".format(
nullable_type.__name__
)
)
elif "sort" in kwargs and kwargs["sort"] is None:
del kwargs["sort"]
super(SQLAlchemyConnectionField, self).__init__(type_, *args, **kwargs)
@classmethod
def get_query(cls, model, info, sort=None, **args):
query = get_query(model, info.context)
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
sort_args = []
# ensure consistent handling of graphene Enums, enum values and
# plain strings
for item in sort:
if isinstance(item, enum.Enum):
sort_args.append(item.value.value)
elif isinstance(item, EnumValue):
sort_args.append(item.value)
else:
sort_args.append(item)
query = query.order_by(*sort_args)
return query
class BatchSQLAlchemyConnectionField(UnsortedSQLAlchemyConnectionField):
"""
This is currently experimental.
The API and behavior may change in future versions.
Use at your own risk.
"""
def wrap_resolve(self, parent_resolver):
return partial(
self.connection_resolver,
self.resolver,
get_nullable_type(self.type),
self.model,
)
@classmethod
def from_relationship(cls, relationship, registry, **field_kwargs):
model = relationship.mapper.entity
model_type = registry.get_type_for_model(model)
return cls(model_type.connection, resolver=get_batch_resolver(relationship), **field_kwargs)
def default_connection_field_factory(relationship, registry, **field_kwargs):
model = relationship.mapper.entity
model_type = registry.get_type_for_model(model)
return __connectionFactory(model_type, **field_kwargs)
# TODO Remove in next major version
__connectionFactory = UnsortedSQLAlchemyConnectionField
def createConnectionField(type_, **field_kwargs):
warnings.warn(
'createConnectionField is deprecated and will be removed in the next '
'major version. Use SQLAlchemyObjectType.Meta.connection_field_factory instead.',
DeprecationWarning,
)
return __connectionFactory(type_, **field_kwargs)
def registerConnectionFieldFactory(factoryMethod):
warnings.warn(
'registerConnectionFieldFactory is deprecated and will be removed in the next '
'major version. Use SQLAlchemyObjectType.Meta.connection_field_factory instead.',
DeprecationWarning,
)
global __connectionFactory
__connectionFactory = factoryMethod
def unregisterConnectionFieldFactory():
warnings.warn(
'registerConnectionFieldFactory is deprecated and will be removed in the next '
'major version. Use SQLAlchemyObjectType.Meta.connection_field_factory instead.',
DeprecationWarning,
)
global __connectionFactory
__connectionFactory = UnsortedSQLAlchemyConnectionField
def get_nullable_type(_type):
if isinstance(_type, NonNull):
return _type.of_type
return _type
|
examples/plot_calibration_curve.py | leozhoujf/scikit-plot | 2,360 | 12695191 | """
An example showing the plot_calibration_curve method
used by a scikit-learn classifier
"""
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.datasets import make_classification
import matplotlib.pyplot as plt
import scikitplot as skplt
X, y = make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2,
random_state=20)
X_train, y_train, X_test, y_test = X[:1000], y[:1000], X[1000:], y[1000:]
rf_probas = RandomForestClassifier().fit(X_train, y_train).predict_proba(X_test)
lr_probas = LogisticRegression().fit(X_train, y_train).predict_proba(X_test)
nb_probas = GaussianNB().fit(X_train, y_train).predict_proba(X_test)
sv_scores = LinearSVC().fit(X_train, y_train).decision_function(X_test)
probas_list = [rf_probas, lr_probas, nb_probas, sv_scores]
clf_names=['Random Forest',
'Logistic Regression',
'Gaussian Naive Bayes',
'Support Vector Machine']
skplt.metrics.plot_calibration_curve(y_test,
probas_list=probas_list,
clf_names=clf_names,
n_bins=10)
plt.show()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.