max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
code/tests/__init__.py | teamiceberg/python-machine-learning-book-2nd-edition | 6,947 | 11138410 | # <NAME> 2016-2017
#
# ann is a supporting package for the book
# "Introduction to Artificial Neural Networks and Deep Learning:
# A Practical Guide with Applications in Python"
#
# Author: <NAME> <<EMAIL>>
#
# License: MIT
|
dash-set-height-of-graph.py | oriolmirosa/dash-recipes | 932 | 11138412 | import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import pandas as pd
stats= {
'Goals': [43,32,19,32,20,15,20,19,19,21,19,8,14],
'Assists': [67, 55, 63, 39, 36, 34, 23, 21, 20, 16, 15, 26, 17],
'Points' : [110, 86, 82, 71, 56, 49, 43, 40, 39, 37, 34, 34, 31],
'Shots' : [223, 202, 167, 172, 147, 115, 135, 103, 119, 112, 120, 101, 102]
}
df = pd.DataFrame(stats)
#button options
options = [{'label': str(i), 'value':str(i)} for i in ['Goals','Assists','Points','Shots']]
#grid layout
container= {
'display':'grid',
'grid-template-columns':'repeat(2, 1fr)',
'grid-template-rows':'repeat(2, minmax(100px, auto))',
'grid-padding': '1em',
}
#placeholder layout
placeholder = {
'background': "#eee",
'border-style':'solid',
'border-size':'0.5em',
'border-color':'#ddd'
}
app = dash.Dash('')
app.css.config.serve_locally = True
app.scripts.config.serve_locally = True
app.layout = html.Div([
# grid 1,1
html.Div([
html.Div(
dcc.Dropdown(
id='category',
options= options,
value= 'Goals'
),
),
html.Div([
dcc.Graph(
id='histogram',
style={'height':'80vh', 'width':'55vw'}
)
]),
# grid 1,2
html.Div('placeholder', style=placeholder)
]),
#grid 2,1
html.Div([
html.Div([
#feature dropdown
dcc.Dropdown(
id='xvalue',
options=options,
value ='goals'
),
dcc.Dropdown(
id='yvalue',
options=options,
value='goals'
),
]),
html.Div(
dcc.Graph( id ='scatter' , style={'height':'80vh', 'width':'55vw'})
)
]),
#grid 2,2
html.Div(
'placeholder', style=placeholder )
], style=container )
#histogram callback
@app.callback(
dash.dependencies.Output('histogram', 'figure'),
[dash.dependencies.Input('category', 'value')]
)
def update_histogram(value):
return {
'data':[
go.Histogram(
x=df[value]
)],
'layout':go.Layout()
}
@app.callback(
dash.dependencies.Output('scatter', 'figure'),
[dash.dependencies.Input('xvalue', 'value'),
dash.dependencies.Input('yvalue', 'value')]
)
def update_scatter(xvalue,yvalue):
return {
'data': [
go.Scatter(
x=df[xvalue],
y=df[yvalue],
mode='markers'
)
] ,
'layout' : go.Layout( )
}
if __name__ == '__main__':
app.run_server(debug=True)
|
scikit_learn_bring_your_own_model_local_serving/scikit_learn_bring_your_own_model_local_serving.py | aws-samples/amazon-sagemaker-local-mode | 111 | 11138437 | # This is a sample Python program that serve a scikit-learn model pre-trained on the California Housing dataset.
# This implementation will work on your *local computer* or in the *AWS Cloud*.
#
# Prerequisites:
# 1. Install required Python packages:
# `pip install -r requirements.txt`
# 2. Docker Desktop installed and running on your computer:
# `docker ps`
# 3. You should have AWS credentials configured on your local machine
# in order to be able to pull the docker image from ECR.
###############################################################################################
import boto3
import pandas as pd
import tarfile
from sagemaker.sklearn import SKLearnModel
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
DUMMY_IAM_ROLE = 'arn:aws:iam::111111111111:role/service-role/AmazonSageMaker-ExecutionRole-20200101T000001'
s3 = boto3.client('s3')
def main():
# Prepare data for model inference - we use the Boston housing dataset
print('Preparing data for model inference')
data = load_boston()
X_train, X_test, y_train, y_test = train_test_split(
data.data, data.target, test_size=0.25, random_state=42
)
# we don't train a model, so we will need only the testing data
testX = pd.DataFrame(X_test, columns=data.feature_names)
# Download a pre-trained model file
print('Downloading a pre-trained model file')
s3.download_file('aws-ml-blog', 'artifacts/scikit_learn_bring_your_own_model/model.joblib', 'model.joblib')
# Creating a model.tar.gz file
tar = tarfile.open('model.tar.gz', 'w:gz')
tar.add('model.joblib')
tar.close()
model = SKLearnModel(
role=DUMMY_IAM_ROLE,
model_data='file://./model.tar.gz',
framework_version='0.23-1',
py_version='py3',
source_dir='code',
entry_point='inference.py'
)
print('Deploying endpoint in local mode')
predictor = model.deploy(initial_instance_count=1, instance_type='local')
predictions = predictor.predict(testX[data.feature_names])
print("Predictions: {}".format(predictor.predict(testX.values)))
print('About to delete the endpoint to stop paying (if in cloud mode).')
predictor.delete_endpoint(predictor.endpoint_name)
if __name__ == "__main__":
main()
|
openaerostruct/structures/disp.py | lamkina/OpenAeroStruct | 114 | 11138448 | <filename>openaerostruct/structures/disp.py
import numpy as np
import openmdao.api as om
class Disp(om.ExplicitComponent):
"""
Reshape the flattened displacements from the linear system solution into
a 2D array so we can more easily use the results.
The solution to the linear system has meaingless entires due to the
constraints on the FEM model. The displacements from this portion of
the linear system are not needed, so we select only the relevant
portion of the displacements for further calculations.
Parameters
----------
disp_aug[6*(ny+1)] : numpy array
Augmented displacement array. Obtained by solving the system
K * disp_aug = forces, where forces is a flattened version of loads.
Returns
-------
disp[6*ny] : numpy array
Actual displacement array formed by truncating disp_aug.
"""
def initialize(self):
self.options.declare("surface", types=dict)
def setup(self):
surface = self.options["surface"]
self.ny = surface["mesh"].shape[1]
self.add_input("disp_aug", val=np.zeros(((self.ny + 1) * 6)), units="m")
self.add_output("disp", val=np.zeros((self.ny, 6)), units="m")
n = self.ny * 6
arange = np.arange((n))
self.declare_partials("disp", "disp_aug", val=1.0, rows=arange, cols=arange)
def compute(self, inputs, outputs):
# Obtain the relevant portions of disp_aug and store the reshaped
# displacements in disp
outputs["disp"] = inputs["disp_aug"][:-6].reshape((-1, 6))
|
docs/examples/userguide/numpy_tutorial/compute_py.py | smok-serwis/cython | 6,663 | 11138454 | import numpy as np
def clip(a, min_value, max_value):
return min(max(a, min_value), max_value)
def compute(array_1, array_2, a, b, c):
"""
This function must implement the formula
np.clip(array_1, 2, 10) * a + array_2 * b + c
array_1 and array_2 are 2D.
"""
x_max = array_1.shape[0]
y_max = array_1.shape[1]
assert array_1.shape == array_2.shape
result = np.zeros((x_max, y_max), dtype=array_1.dtype)
for x in range(x_max):
for y in range(y_max):
tmp = clip(array_1[x, y], 2, 10)
tmp = tmp * a + array_2[x, y] * b
result[x, y] = tmp + c
return result
|
crypto-primitive/solve.py | sixstars/starctf2018 | 140 | 11138458 | <reponame>sixstars/starctf2018<gh_stars>100-1000
from pwn import *
import string
from hashlib import sha256
#context.log_level='debug'
def dopow():
chal = c.recvline()
post = chal[12:28]
tar = chal[33:-1]
c.recvuntil(':')
found = iters.bruteforce(lambda x:sha256(x+post).hexdigest()==tar, string.ascii_letters+string.digits, 4)
c.sendline(found)
#c = remote('127.0.0.1',10001)
c = remote('192.168.127.12',10001)
dopow()
pt='GoodCipher'
def doswap(a,b):
if a==b:
return
if a>b:
tmp=b
b=a
a=tmp
ans=[]
ans.append((0,256-a))
b-=a
a=0
while b!=1:
tmp=0
lo=1
while b&lo==0:
lo<<=1
tmp+=1
if b==lo:
ans.append((1,8-tmp))
break
if tmp!=0:
ans.append((1,8-tmp))
b>>=tmp
ans.append((2,1))
b^=1
ans.append((0,255))
b-=1
ans.append((0,254))
for a,b in ans:
c.sendline('%d %d'%(a,b))
c.recvline()
for a,b in [(0,2),(1,7),(0,255),(1,1)]:
c.sendline('%d %d'%(a,b))
c.recvline()
for a,b in ans[::-1]:
if a==0:
c.sendline('%d %d'%(a,256-b))
elif a==1:
c.sendline('%d %d'%(a,8-b))
elif a==2:
c.sendline('%d %d'%(a,b))
c.recvline()
for i in range(3):
print i
m=range(256)
c.recvuntil('ciphertext is ')
ct=c.recvline().strip()
ct=ct.decode('hex')
assert len(ct)==10
for i in range(10):
a=ord(ct[i])
b=ord(pt[i])
#print m[a],b
doswap(m[a],b)
for j in range(256):
if m[j]==b:
m[j]=m[a]
m[a]=b
break
c.sendline('-1')
c.recvuntil('Your flag here.\n')
print c.recvline()
|
adabins/evaluate.py | rosivbus/aphantasia | 579 | 11138461 | <filename>adabins/evaluate.py
import argparse
import os
import sys
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
from tqdm import tqdm
import model_io
from dataloader import DepthDataLoader
from models import UnetAdaptiveBins
from utils import RunningAverageDict
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
err = np.log(pred) - np.log(gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
log_10 = (np.abs(np.log10(gt) - np.log10(pred))).mean()
return dict(a1=a1, a2=a2, a3=a3, abs_rel=abs_rel, rmse=rmse, log_10=log_10, rmse_log=rmse_log,
silog=silog, sq_rel=sq_rel)
# def denormalize(x, device='cpu'):
# mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
# std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
# return x * std + mean
#
def predict_tta(model, image, args):
pred = model(image)[-1]
# pred = utils.depth_norm(pred)
# pred = nn.functional.interpolate(pred, depth.shape[-2:], mode='bilinear', align_corners=True)
# pred = np.clip(pred.cpu().numpy(), 10, 1000)/100.
pred = np.clip(pred.cpu().numpy(), args.min_depth, args.max_depth)
image = torch.Tensor(np.array(image.cpu().numpy())[..., ::-1].copy()).to(device)
pred_lr = model(image)[-1]
# pred_lr = utils.depth_norm(pred_lr)
# pred_lr = nn.functional.interpolate(pred_lr, depth.shape[-2:], mode='bilinear', align_corners=True)
# pred_lr = np.clip(pred_lr.cpu().numpy()[...,::-1], 10, 1000)/100.
pred_lr = np.clip(pred_lr.cpu().numpy()[..., ::-1], args.min_depth, args.max_depth)
final = 0.5 * (pred + pred_lr)
final = nn.functional.interpolate(torch.Tensor(final), image.shape[-2:], mode='bilinear', align_corners=True)
return torch.Tensor(final)
def eval(model, test_loader, args, gpus=None, ):
if gpus is None:
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
else:
device = gpus[0]
if args.save_dir is not None:
os.makedirs(args.save_dir)
metrics = RunningAverageDict()
# crop_size = (471 - 45, 601 - 41)
# bins = utils.get_bins(100)
total_invalid = 0
with torch.no_grad():
model.eval()
sequential = test_loader
for batch in tqdm(sequential):
image = batch['image'].to(device)
gt = batch['depth'].to(device)
final = predict_tta(model, image, args)
final = final.squeeze().cpu().numpy()
# final[final < args.min_depth] = args.min_depth
# final[final > args.max_depth] = args.max_depth
final[np.isinf(final)] = args.max_depth
final[np.isnan(final)] = args.min_depth
if args.save_dir is not None:
if args.dataset == 'nyu':
impath = f"{batch['image_path'][0].replace('/', '__').replace('.jpg', '')}"
factor = 1000
else:
dpath = batch['image_path'][0].split('/')
impath = dpath[1] + "_" + dpath[-1]
impath = impath.split('.')[0]
factor = 256
# rgb_path = os.path.join(rgb_dir, f"{impath}.png")
# tf.ToPILImage()(denormalize(image.squeeze().unsqueeze(0).cpu()).squeeze()).save(rgb_path)
pred_path = os.path.join(args.save_dir, f"{impath}.png")
pred = (final * factor).astype('uint16')
Image.fromarray(pred).save(pred_path)
if 'has_valid_depth' in batch:
if not batch['has_valid_depth']:
# print("Invalid ground truth")
total_invalid += 1
continue
gt = gt.squeeze().cpu().numpy()
valid_mask = np.logical_and(gt > args.min_depth, gt < args.max_depth)
if args.garg_crop or args.eigen_crop:
gt_height, gt_width = gt.shape
eval_mask = np.zeros(valid_mask.shape)
if args.garg_crop:
eval_mask[int(0.40810811 * gt_height):int(0.99189189 * gt_height),
int(0.03594771 * gt_width):int(0.96405229 * gt_width)] = 1
elif args.eigen_crop:
if args.dataset == 'kitti':
eval_mask[int(0.3324324 * gt_height):int(0.91351351 * gt_height),
int(0.0359477 * gt_width):int(0.96405229 * gt_width)] = 1
else:
eval_mask[45:471, 41:601] = 1
valid_mask = np.logical_and(valid_mask, eval_mask)
# gt = gt[valid_mask]
# final = final[valid_mask]
metrics.update(compute_errors(gt[valid_mask], final[valid_mask]))
print(f"Total invalid: {total_invalid}")
metrics = {k: round(v, 3) for k, v in metrics.get_value().items()}
print(f"Metrics: {metrics}")
def convert_arg_line_to_args(arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield str(arg)
if __name__ == '__main__':
# Arguments
parser = argparse.ArgumentParser(description='Model evaluator', fromfile_prefix_chars='@',
conflict_handler='resolve')
parser.convert_arg_line_to_args = convert_arg_line_to_args
parser.add_argument('--n-bins', '--n_bins', default=256, type=int,
help='number of bins/buckets to divide depth range into')
parser.add_argument('--gpu', default=None, type=int, help='Which gpu to use')
parser.add_argument('--save-dir', '--save_dir', default=None, type=str, help='Store predictions in folder')
parser.add_argument("--root", default=".", type=str,
help="Root folder to save data in")
parser.add_argument("--dataset", default='nyu', type=str, help="Dataset to train on")
parser.add_argument("--data_path", default='../dataset/nyu/sync/', type=str,
help="path to dataset")
parser.add_argument("--gt_path", default='../dataset/nyu/sync/', type=str,
help="path to dataset gt")
parser.add_argument('--filenames_file',
default="./train_test_inputs/nyudepthv2_train_files_with_gt.txt",
type=str, help='path to the filenames text file')
parser.add_argument('--input_height', type=int, help='input height', default=416)
parser.add_argument('--input_width', type=int, help='input width', default=544)
parser.add_argument('--max_depth', type=float, help='maximum depth in estimation', default=10)
parser.add_argument('--min_depth', type=float, help='minimum depth in estimation', default=1e-3)
parser.add_argument('--do_kb_crop', help='if set, crop input images as kitti benchmark images', action='store_true')
parser.add_argument('--data_path_eval',
default="../dataset/nyu/official_splits/test/",
type=str, help='path to the data for online evaluation')
parser.add_argument('--gt_path_eval', default="../dataset/nyu/official_splits/test/",
type=str, help='path to the groundtruth data for online evaluation')
parser.add_argument('--filenames_file_eval',
default="./train_test_inputs/nyudepthv2_test_files_with_gt.txt",
type=str, help='path to the filenames text file for online evaluation')
parser.add_argument('--checkpoint_path', '--checkpoint-path', type=str, required=True,
help="checkpoint file to use for prediction")
parser.add_argument('--min_depth_eval', type=float, help='minimum depth for evaluation', default=1e-3)
parser.add_argument('--max_depth_eval', type=float, help='maximum depth for evaluation', default=10)
parser.add_argument('--eigen_crop', help='if set, crops according to Eigen NIPS14', action='store_true')
parser.add_argument('--garg_crop', help='if set, crops according to Garg ECCV16', action='store_true')
parser.add_argument('--do_kb_crop', help='Use kitti benchmark cropping', action='store_true')
if sys.argv.__len__() == 2:
arg_filename_with_prefix = '@' + sys.argv[1]
args = parser.parse_args([arg_filename_with_prefix])
else:
args = parser.parse_args()
# args = parser.parse_args()
args.gpu = int(args.gpu) if args.gpu is not None else 0
args.distributed = False
device = torch.device('cuda:{}'.format(args.gpu))
test = DepthDataLoader(args, 'online_eval').data
model = UnetAdaptiveBins.build(n_bins=args.n_bins, min_val=args.min_depth, max_val=args.max_depth,
norm='linear').to(device)
model = model_io.load_checkpoint(args.checkpoint_path, model)[0]
model = model.eval()
eval(model, test, args, gpus=[device])
|
demos/python/sdk_wireless_camera_control/tests/e2e/test_gopro_wifi_commands_e2e.py | Natureshadow/OpenGoPro | 210 | 11138463 | # test_gopro_wifi_commands_e2e.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Tue Sep 7 21:35:52 UTC 2021
"""End to end testing of GoPro BLE functionality"""
from pathlib import Path
import pytest
from open_gopro import GoPro
from open_gopro.api import api_versions
@pytest.mark.asyncio
async def test_get_media_list(gopro_ble_and_wifi: GoPro):
assert gopro_ble_and_wifi.wifi_command.get_media_list().is_ok
@pytest.mark.asyncio
async def test_get_state(gopro_ble_and_wifi: GoPro):
assert gopro_ble_and_wifi.wifi_command.get_camera_state().is_ok
@pytest.mark.asyncio
async def test_get_version(gopro_ble_and_wifi: GoPro):
result = gopro_ble_and_wifi.wifi_command.get_open_gopro_api_version()
assert result.is_ok
assert result.flatten in list(api_versions.keys())
@pytest.mark.asyncio
async def test_set_turbo_mode(gopro_ble_and_wifi: GoPro):
print("Setting turbo mode on")
assert gopro_ble_and_wifi.wifi_command.set_turbo_mode(gopro_ble_and_wifi.params.Toggle.ENABLE).is_ok
print("Setting turbo mode off")
assert gopro_ble_and_wifi.wifi_command.set_turbo_mode(gopro_ble_and_wifi.params.Toggle.DISABLE).is_ok
@pytest.mark.asyncio
async def test_set_resolution(gopro_ble_and_wifi: GoPro):
if gopro_ble_and_wifi.version >= 2.0:
assert gopro_ble_and_wifi.ble_setting.video_performance_mode.set(
gopro_ble_and_wifi.params.VideoPerformanceMode.MAX_PERFORMANCE
).is_ok
assert gopro_ble_and_wifi.ble_setting.max_lens_mode.set(
gopro_ble_and_wifi.params.MaxLensMode.DEFAULT
).is_ok
assert gopro_ble_and_wifi.wifi_command.set_preset(gopro_ble_and_wifi.params.Preset.CINEMATIC).is_ok
for resolution in gopro_ble_and_wifi.params.Resolution:
print(f"Setting resolution to {resolution.name}")
assert gopro_ble_and_wifi.wifi_setting.resolution.set(resolution).is_ok
@pytest.mark.asyncio
async def test_download_photo(gopro_ble_and_wifi: GoPro):
media_list = gopro_ble_and_wifi.wifi_command.get_media_list()["media"][0]["fs"]
# Find a picture and download it
picture = ""
for file in [x["n"] for x in media_list]:
if file.lower().endswith(".jpg"):
picture = file
write_location = Path("out.jpg")
gopro_ble_and_wifi.wifi_command.download_file(camera_file=picture, local_file=write_location)
assert write_location.exists()
return
assert False
|
concordia/migrations/0032_topic_ordering.py | juliecentofanti172/juliecentofanti.github.io | 134 | 11138470 | # Generated by Django 2.2 on 2019-05-29 18:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("concordia", "0031_auto_20190509_1142")]
operations = [
migrations.AddField(
model_name="topic",
name="ordering",
field=models.IntegerField(
default=0,
help_text="Sort order override: lower values will be listed first",
),
)
]
|
sqlacodegen/codegen.py | danerlt/flask-sqlacodegen | 265 | 11138477 | <gh_stars>100-1000
"""Contains the code generation logic and helper functions."""
from __future__ import unicode_literals, division, print_function, absolute_import
from collections import defaultdict
from inspect import ArgSpec
from keyword import iskeyword
import inspect
import sys
import re
from sqlalchemy import (Enum, ForeignKeyConstraint, PrimaryKeyConstraint, CheckConstraint, UniqueConstraint, Table,
Column)
from sqlalchemy.schema import ForeignKey
from sqlalchemy.util import OrderedDict
from sqlalchemy.types import Boolean, String
import sqlalchemy
try:
from sqlalchemy.sql.expression import text, TextClause
except ImportError:
# SQLAlchemy < 0.8
from sqlalchemy.sql.expression import text, _TextClause as TextClause
_re_boolean_check_constraint = re.compile(r"(?:(?:.*?)\.)?(.*?) IN \(0, 1\)")
_re_column_name = re.compile(r'(?:(["`]?)(?:.*)\1\.)?(["`]?)(.*)\2')
_re_enum_check_constraint = re.compile(r"(?:(?:.*?)\.)?(.*?) IN \((.+)\)")
_re_enum_item = re.compile(r"'(.*?)(?<!\\)'")
_re_invalid_identifier = re.compile(r'[^a-zA-Z0-9_]' if sys.version_info[0] < 3 else r'(?u)\W')
_re_first_cap = re.compile('(.)([A-Z][a-z]+)')
_re_all_cap = re.compile('([a-z0-9])([A-Z])')
_flask_prepend = 'db.'
class _DummyInflectEngine(object):
def singular_noun(self, noun):
return noun
def plural_noun(self, noun): # needed for backrefs
import inflect
inflect_engine = inflect.engine()
return inflect_engine.plural_noun(noun)
# In SQLAlchemy 0.x, constraint.columns is sometimes a list, on 1.x onwards, always a ColumnCollection
def _get_column_names(constraint):
if isinstance(constraint.columns, list):
return constraint.columns
return list(constraint.columns.keys())
def _convert_to_valid_identifier(name):
assert name, 'Identifier cannot be empty'
if name[0].isdigit() or iskeyword(name):
name = '_' + name
return _re_invalid_identifier.sub('_', name)
def _get_compiled_expression(statement):
"""Returns the statement in a form where any placeholders have been filled in."""
if isinstance(statement, TextClause):
return statement.text
dialect = statement._from_objects[0].bind.dialect
compiler = statement._compiler(dialect)
# Adapted from http://stackoverflow.com/a/5698357/242021
class LiteralCompiler(compiler.__class__):
def visit_bindparam(self, bindparam, within_columns_clause=False, literal_binds=False, **kwargs):
return super(LiteralCompiler, self).render_literal_bindparam(
bindparam, within_columns_clause=within_columns_clause,
literal_binds=literal_binds, **kwargs
)
compiler = LiteralCompiler(dialect, statement)
return compiler.process(statement)
def _get_constraint_sort_key(constraint):
if isinstance(constraint, CheckConstraint):
return 'C{0}'.format(constraint.sqltext)
return constraint.__class__.__name__[0] + repr(_get_column_names(constraint))
def _get_common_fk_constraints(table1, table2):
"""Returns a set of foreign key constraints the two tables have against each other."""
c1 = set(c for c in table1.constraints if isinstance(c, ForeignKeyConstraint) and
c.elements[0].column.table == table2)
c2 = set(c for c in table2.constraints if isinstance(c, ForeignKeyConstraint) and
c.elements[0].column.table == table1)
return c1.union(c2)
def _getargspec_init(method):
try:
return inspect.getargspec(method)
except TypeError:
if method is object.__init__:
return ArgSpec(['self'], None, None, None)
else:
return ArgSpec(['self'], 'args', 'kwargs', None)
def _render_column_type(coltype):
args = []
if isinstance(coltype, Enum):
args.extend(repr(arg) for arg in coltype.enums)
if coltype.name is not None:
args.append('name={0!r}'.format(coltype.name))
else:
# All other types
argspec = _getargspec_init(coltype.__class__.__init__)
defaults = dict(zip(argspec.args[-len(argspec.defaults or ()):], argspec.defaults or ()))
missing = object()
use_kwargs = False
for attr in argspec.args[1:]:
# Remove annoyances like _warn_on_bytestring
if attr.startswith('_'):
continue
value = getattr(coltype, attr, missing)
default = defaults.get(attr, missing)
if value is missing or value == default:
use_kwargs = True
elif use_kwargs:
args.append('{0}={1}'.format(attr, repr(value)))
else:
args.append(repr(value))
text = _flask_prepend + coltype.__class__.__name__
# In case of ENUM from sqlalchemy.dialects, the flask used db.Enum
if text == "db.ENUM":
text = "db.Enum"
if args:
text += '({0})'.format(', '.join(args))
return text
def _render_column(column, show_name):
kwarg = []
is_sole_pk = column.primary_key and len(column.table.primary_key) == 1
dedicated_fks = [c for c in column.foreign_keys if len(c.constraint.columns) == 1]
is_unique = any(isinstance(c, UniqueConstraint) and set(c.columns) == set([column])
for c in column.table.constraints)
is_unique = is_unique or any(i.unique and set(i.columns) == set([column]) for i in column.table.indexes)
has_index = any(set(i.columns) == set([column]) for i in column.table.indexes)
# Render the column type if there are no foreign keys on it or any of them points back to itself
render_coltype = not dedicated_fks or any(fk.column is column for fk in dedicated_fks)
if column.key != column.name:
kwarg.append('key')
if column.primary_key:
kwarg.append('primary_key')
if not column.nullable and not is_sole_pk:
kwarg.append('nullable')
if is_unique:
column.unique = True
kwarg.append('unique')
elif has_index:
column.index = True
kwarg.append('index')
if column.server_default:
server_default = 'server_default=' + _flask_prepend + 'FetchedValue()'
comment = getattr(column, 'comment', None)
return _flask_prepend + 'Column({0})'.format(', '.join(
([repr(column.name)] if show_name else []) +
([_render_column_type(column.type)] if render_coltype else []) +
[_render_constraint(x) for x in dedicated_fks] +
[repr(x) for x in column.constraints] +
['{0}={1}'.format(k, repr(getattr(column, k))) for k in kwarg] +
([server_default] if column.server_default else []) +
(['info={!r}'.format(comment)] if comment else [])
))
def _render_constraint(constraint):
def render_fk_options(*opts):
opts = [repr(opt) for opt in opts]
for attr in 'ondelete', 'onupdate', 'deferrable', 'initially', 'match':
value = getattr(constraint, attr, None)
if value:
opts.append('{0}={1!r}'.format(attr, value))
return ', '.join(opts)
if isinstance(constraint, ForeignKey):
remote_column = '{0}.{1}'.format(constraint.column.table.fullname, constraint.column.name)
return _flask_prepend + 'ForeignKey({0})'.format(render_fk_options(remote_column))
elif isinstance(constraint, ForeignKeyConstraint):
local_columns = _get_column_names(constraint)
remote_columns = ['{0}.{1}'.format(fk.column.table.fullname, fk.column.name)
for fk in constraint.elements]
return _flask_prepend + 'ForeignKeyConstraint({0})'.format(render_fk_options(local_columns, remote_columns))
elif isinstance(constraint, CheckConstraint):
return _flask_prepend + 'CheckConstraint({0!r})'.format(_get_compiled_expression(constraint.sqltext))
elif isinstance(constraint, UniqueConstraint):
columns = [repr(col.name) for col in constraint.columns]
return _flask_prepend + 'UniqueConstraint({0})'.format(', '.join(columns))
def _underscore(name):
"""Converts CamelCase to camel_case. See http://stackoverflow.com/questions/1175208"""
s1 = _re_first_cap.sub(r'\1_\2', name)
return _re_all_cap.sub(r'\1_\2', s1).lower()
def _is_model_descendant(model_a, model_b):
"""Check to see if model class A inherits from another model class B"""
if model_a.name == model_b.name:
return True
if not model_b.children:
return False
return any(_is_model_descendant(model_a, b) for b in model_b.children)
def _render_index(index):
columns = [repr(col.name) for col in index.columns]
return _flask_prepend + 'Index({0!r}, {1})'.format(index.name, ', '.join(columns))
class ImportCollector(OrderedDict):
def add_import(self, obj):
type_ = type(obj) if not isinstance(obj, type) else obj
pkgname = 'sqlalchemy' if type_.__name__ in sqlalchemy.__all__ else type_.__module__ # @UndefinedVariable
self.add_literal_import(pkgname, type_.__name__)
def add_literal_import(self, pkgname, name):
names = self.setdefault(pkgname, set())
names.add(name)
def render(self):
return '\n'.join('from {0} import {1}'.format(package, ', '.join(sorted(names)))
for package, names in self.items())
class Model(object):
def __init__(self, table):
super(Model, self).__init__()
self.table = table
self.schema = table.schema
# Adapt column types to the most reasonable generic types (ie. VARCHAR -> String)
for column in table.columns:
cls = column.type.__class__
for supercls in cls.__mro__:
if hasattr(supercls, '__visit_name__'):
cls = supercls
if supercls.__name__ != supercls.__name__.upper() and not supercls.__name__.startswith('_'):
break
column.type = column.type.adapt(cls)
def add_imports(self, collector):
if self.table.columns:
collector.add_import(Column)
for column in self.table.columns:
collector.add_import(column.type)
if column.server_default:
collector.add_literal_import('sqlalchemy.schema', 'FetchedValue')
for constraint in sorted(self.table.constraints, key=_get_constraint_sort_key):
if isinstance(constraint, ForeignKeyConstraint):
if len(constraint.columns) > 1:
collector.add_literal_import('sqlalchemy', 'ForeignKeyConstraint')
else:
collector.add_literal_import('sqlalchemy', 'ForeignKey')
elif isinstance(constraint, UniqueConstraint):
if len(constraint.columns) > 1:
collector.add_literal_import('sqlalchemy', 'UniqueConstraint')
elif not isinstance(constraint, PrimaryKeyConstraint):
collector.add_import(constraint)
for index in self.table.indexes:
if len(index.columns) > 1:
collector.add_import(index)
class ModelTable(Model):
def add_imports(self, collector):
super(ModelTable, self).add_imports(collector)
collector.add_import(Table)
def render(self):
met = ' metadata,' if _flask_prepend == '' else ''
text = 't_{0} = {1}Table(\n {0!r},{2}\n'.format(self.table.name, _flask_prepend, met)
for column in self.table.columns:
text += ' {0},\n'.format(_render_column(column, True))
for constraint in sorted(self.table.constraints, key=_get_constraint_sort_key):
if isinstance(constraint, PrimaryKeyConstraint):
continue
if isinstance(constraint, (ForeignKeyConstraint, UniqueConstraint)) and len(constraint.columns) == 1:
continue
text += ' {0},\n'.format(_render_constraint(constraint))
for index in self.table.indexes:
if len(index.columns) > 1:
text += ' {0},\n'.format(_render_index(index))
if self.schema:
text += " schema='{0}',\n".format(self.schema)
return text.rstrip('\n,') + '\n)'
class ModelClass(Model):
parent_name = 'Base'
def __init__(self, table, association_tables, inflect_engine, detect_joined):
super(ModelClass, self).__init__(table)
self.name = self._tablename_to_classname(table.name, inflect_engine)
self.children = []
self.attributes = OrderedDict()
# Assign attribute names for columns
for column in table.columns:
self._add_attribute(column.name, column)
# Add many-to-one relationships
pk_column_names = set(col.name for col in table.primary_key.columns)
for constraint in sorted(table.constraints, key=_get_constraint_sort_key):
if isinstance(constraint, ForeignKeyConstraint):
target_cls = self._tablename_to_classname(constraint.elements[0].column.table.name, inflect_engine)
if (detect_joined and self.parent_name == 'Base' and
set(_get_column_names(constraint)) == pk_column_names):
self.parent_name = target_cls
else:
relationship_ = ManyToOneRelationship(self.name, target_cls, constraint, inflect_engine)
self._add_attribute(relationship_.preferred_name, relationship_)
# Add many-to-many relationships
for association_table in association_tables:
fk_constraints = [c for c in association_table.constraints if isinstance(c, ForeignKeyConstraint)]
fk_constraints.sort(key=_get_constraint_sort_key)
target_cls = self._tablename_to_classname(fk_constraints[1].elements[0].column.table.name, inflect_engine)
relationship_ = ManyToManyRelationship(self.name, target_cls, association_table, inflect_engine)
self._add_attribute(relationship_.preferred_name, relationship_)
@staticmethod
def _tablename_to_classname(tablename, inflect_engine):
camel_case_name = ''.join(part[:1].upper() + part[1:] for part in re.split(r'_|-', tablename))
return inflect_engine.singular_noun(camel_case_name) or camel_case_name
def _add_attribute(self, attrname, value):
attrname = tempname = _convert_to_valid_identifier(attrname)
counter = 1
while tempname in self.attributes:
tempname = attrname + str(counter)
counter += 1
self.attributes[tempname] = value
return tempname
def add_imports(self, collector):
super(ModelClass, self).add_imports(collector)
if any(isinstance(value, Relationship) for value in self.attributes.values()):
collector.add_literal_import('sqlalchemy.orm', 'relationship')
for child in self.children:
child.add_imports(collector)
def render(self):
text = 'class {0}({1}):\n'.format(self.name, self.parent_name)
text += ' __tablename__ = {0!r}\n'.format(self.table.name)
# Render constraints and indexes as __table_args__
table_args = []
for constraint in sorted(self.table.constraints, key=_get_constraint_sort_key):
if isinstance(constraint, PrimaryKeyConstraint):
continue
if isinstance(constraint, (ForeignKeyConstraint, UniqueConstraint)) and len(constraint.columns) == 1:
continue
table_args.append(_render_constraint(constraint))
for index in self.table.indexes:
if len(index.columns) > 1:
table_args.append(_render_index(index))
table_kwargs = {}
if self.schema:
table_kwargs['schema'] = self.schema
kwargs_items = ', '.join('{0!r}: {1!r}'.format(key, table_kwargs[key]) for key in table_kwargs)
kwargs_items = '{{{0}}}'.format(kwargs_items) if kwargs_items else None
if table_kwargs and not table_args:
text += ' __table_args__ = {0}\n'.format(kwargs_items)
elif table_args:
if kwargs_items:
table_args.append(kwargs_items)
if len(table_args) == 1:
table_args[0] += ','
text += ' __table_args__ = (\n {0}\n )\n'.format(',\n '.join(table_args))
# Render columns
text += '\n'
for attr, column in self.attributes.items():
if isinstance(column, Column):
show_name = attr != column.name
text += ' {0} = {1}\n'.format(attr, _render_column(column, show_name))
# Render relationships
if any(isinstance(value, Relationship) for value in self.attributes.values()):
text += '\n'
for attr, relationship in self.attributes.items():
if isinstance(relationship, Relationship):
text += ' {0} = {1}\n'.format(attr, relationship.render())
# Render subclasses
for child_class in self.children:
text += '\n\n' + child_class.render()
return text
class Relationship(object):
def __init__(self, source_cls, target_cls):
super(Relationship, self).__init__()
self.source_cls = source_cls
self.target_cls = target_cls
self.kwargs = OrderedDict()
self.backref_name = _underscore(self.source_cls)
def render(self):
text = _flask_prepend + 'relationship('
args = [repr(self.target_cls)]
if 'secondaryjoin' in self.kwargs:
text += '\n '
delimiter, end = ',\n ', '\n )'
else:
delimiter, end = ', ', ')'
args.extend([key + '=' + value for key, value in self.kwargs.items()])
return text + delimiter.join(args) + end
def make_backref(self, relationships, classes):
backref = self.backref_name
original_backref = backref
# Check if backref already exists for relationship source_cls to target_cls and add suffix
suffix = 0
while (self.target_cls, backref) in [(x.target_cls, x.backref_name) for x in relationships]:
backref = original_backref + str('_{0}'.format(suffix))
suffix += 1
self.kwargs['backref'] = repr(backref)
# Check if any of the target_cls inherit from other target_cls
# If so, modify backref name of descendant
# "backref({0}, lazy='dynamic')".format(repr(backref))
for rel in [x for x in relationships if 'backref' in x.kwargs]:
if self.target_cls in classes and rel.target_cls in classes:
if _is_model_descendant(classes[self.target_cls], classes[rel.target_cls]):
self.backref_name = self.target_cls.lower() + '_' + backref
self.kwargs['backref'] = repr(self.backref_name)
if _is_model_descendant(classes[rel.target_cls], classes[self.target_cls]):
backref = rel.backref_name
rel.backref_name = rel.target_cls.lower() + '_' + backref
rel.kwargs['backref'] = repr(rel.backref_name)
class ManyToOneRelationship(Relationship):
def __init__(self, source_cls, target_cls, constraint, inflect_engine):
super(ManyToOneRelationship, self).__init__(source_cls, target_cls)
column_names = _get_column_names(constraint)
colname = column_names[0]
tablename = constraint.elements[0].column.table.name
if not colname.endswith('_id'):
self.preferred_name = inflect_engine.singular_noun(tablename) or tablename
else:
self.preferred_name = colname[:-3]
self.backref_name = inflect_engine.plural_noun(self.backref_name)
# Add uselist=False to One-to-One relationships
if any(isinstance(c, (PrimaryKeyConstraint, UniqueConstraint)) and
set(col.name for col in c.columns) == set(column_names)
for c in constraint.table.constraints):
self.kwargs['uselist'] = 'False'
# Handle self referential relationships
if source_cls == target_cls:
self.preferred_name = 'parent' if not colname.endswith('_id') else colname[:-3]
pk_col_names = [col.name for col in constraint.table.primary_key]
self.kwargs['remote_side'] = '[{0}]'.format(', '.join(pk_col_names))
# If the two tables share more than one foreign key constraint,
# SQLAlchemy needs an explicit primaryjoin to figure out which column(s) to join with
# common_fk_constraints = _get_common_fk_constraints(constraint.table, constraint.elements[0].column.table)
# if len(common_fk_constraints) > 1:
# self.kwargs['primaryjoin'] = "'{0}.{1} == {2}.{3}'".format(source_cls, constraint.columns[0], target_cls, constraint.elements[0].column.name)
if len(constraint.elements) > 1: # or
self.kwargs['primaryjoin'] = "'and_({0})'".format(', '.join(['{0}.{1} == {2}.{3}'.format(source_cls, k.parent.name, target_cls, k.column.name)
for k in constraint.elements]))
else:
self.kwargs['primaryjoin'] = "'{0}.{1} == {2}.{3}'".format(source_cls, column_names[0], target_cls,
constraint.elements[0].column.name)
class ManyToManyRelationship(Relationship):
def __init__(self, source_cls, target_cls, assocation_table, inflect_engine):
super(ManyToManyRelationship, self).__init__(source_cls, target_cls)
prefix = assocation_table.schema + '.' if assocation_table.schema is not None else ''
self.kwargs['secondary'] = repr(prefix + assocation_table.name)
constraints = [c for c in assocation_table.constraints if isinstance(c, ForeignKeyConstraint)]
constraints.sort(key=_get_constraint_sort_key)
colname = _get_column_names(constraints[1])[0]
tablename = constraints[1].elements[0].column.table.name
self.preferred_name = tablename if not colname.endswith('_id') else colname[:-3] + 's'
self.backref_name = inflect_engine.plural_noun(self.backref_name)
# Handle self referential relationships
if source_cls == target_cls:
self.preferred_name = 'parents' if not colname.endswith('_id') else colname[:-3] + 's'
pri_pairs = zip(_get_column_names(constraints[0]), constraints[0].elements)
sec_pairs = zip(_get_column_names(constraints[1]), constraints[1].elements)
pri_joins = ['{0}.{1} == {2}.c.{3}'.format(source_cls, elem.column.name, assocation_table.name, col)
for col, elem in pri_pairs]
sec_joins = ['{0}.{1} == {2}.c.{3}'.format(target_cls, elem.column.name, assocation_table.name, col)
for col, elem in sec_pairs]
self.kwargs['primaryjoin'] = (
repr('and_({0})'.format(', '.join(pri_joins))) if len(pri_joins) > 1 else repr(pri_joins[0]))
self.kwargs['secondaryjoin'] = (
repr('and_({0})'.format(', '.join(sec_joins))) if len(sec_joins) > 1 else repr(sec_joins[0]))
class CodeGenerator(object):
header = '# coding: utf-8'
footer = ''
def __init__(self, metadata, noindexes=False, noconstraints=False,
nojoined=False, noinflect=False, nobackrefs=False,
flask=False, ignore_cols=None, noclasses=False, nocomments=False, notables=False):
super(CodeGenerator, self).__init__()
if noinflect:
inflect_engine = _DummyInflectEngine()
else:
import inflect
inflect_engine = inflect.engine()
# exclude these column names from consideration when generating association tables
_ignore_columns = ignore_cols or []
self.flask = flask
if not self.flask:
global _flask_prepend
_flask_prepend = ''
self.nocomments = nocomments
# Pick association tables from the metadata into their own set, don't process them normally
links = defaultdict(lambda: [])
association_tables = set()
for table in metadata.tables.values():
# Link tables have exactly two foreign key constraints and all columns are involved in them
# except for special columns like id, inserted, and updated
fk_constraints = [constr for constr in table.constraints if isinstance(constr, ForeignKeyConstraint)]
if len(fk_constraints) == 2 and all(col.foreign_keys for col in table.columns if col.name not in _ignore_columns):
association_tables.add(table.name)
tablename = sorted(fk_constraints, key=_get_constraint_sort_key)[0].elements[0].column.table.name
links[tablename].append(table)
# Iterate through the tables and create model classes when possible
self.models = []
self.collector = ImportCollector()
classes = {}
for table in sorted(metadata.tables.values(), key=lambda t: (t.schema or '', t.name)):
# Support for Alembic and sqlalchemy-migrate -- never expose the schema version tables
if table.name in ('alembic_version', 'migrate_version'):
continue
if noindexes:
table.indexes.clear()
if noconstraints:
table.constraints = set([table.primary_key])
table.foreign_keys.clear()
for col in table.columns:
col.foreign_keys.clear()
else:
# Detect check constraints for boolean and enum columns
for constraint in table.constraints.copy():
if isinstance(constraint, CheckConstraint):
sqltext = _get_compiled_expression(constraint.sqltext)
# Turn any integer-like column with a CheckConstraint like "column IN (0, 1)" into a Boolean
match = _re_boolean_check_constraint.match(sqltext)
if match:
colname = _re_column_name.match(match.group(1)).group(3)
table.constraints.remove(constraint)
table.c[colname].type = Boolean()
continue
# Turn any string-type column with a CheckConstraint like "column IN (...)" into an Enum
match = _re_enum_check_constraint.match(sqltext)
if match:
colname = _re_column_name.match(match.group(1)).group(3)
items = match.group(2)
if isinstance(table.c[colname].type, String):
table.constraints.remove(constraint)
if not isinstance(table.c[colname].type, Enum):
options = _re_enum_item.findall(items)
table.c[colname].type = Enum(*options, native_enum=False)
continue
# Only generate classes when notables is set to True
if notables:
model = ModelClass(table, links[table.name], inflect_engine, not nojoined)
classes[model.name] = model
elif not table.primary_key or table.name in association_tables or noclasses:
# Only form model classes for tables that have a primary key and are not association tables
model = ModelTable(table)
elif not noclasses:
model = ModelClass(table, links[table.name], inflect_engine, not nojoined)
classes[model.name] = model
self.models.append(model)
# collect imports for models only if flask is not specified
if not self.flask:
model.add_imports(self.collector)
# Nest inherited classes in their superclasses to ensure proper ordering
for model in classes.values():
if model.parent_name != 'Base':
classes[model.parent_name].children.append(model)
self.models.remove(model)
# If backrefs are allowed. Resolve any relationships conflicts where one
# target class might inherit from another
if not nobackrefs:
for model in classes.values():
visited = []
for relationship in model.attributes.values():
if isinstance(relationship, Relationship):
relationship.make_backref(visited, classes)
visited.append(relationship)
if self.flask:
# Add Flask-SQLAlchemy support
self.collector.add_literal_import('flask_sqlalchemy', 'SQLAlchemy')
parent_name = 'db.Model'
for model in classes.values():
if model.parent_name == 'Base':
model.parent_name = parent_name
else:
self.collector.add_literal_import('sqlalchemy.ext.declarative', 'declarative_base')
self.collector.add_literal_import('sqlalchemy', 'MetaData')
def render(self, outfile=sys.stdout):
print(self.header, file=outfile)
# Render the collected imports
print(self.collector.render() + '\n\n', file=outfile)
if self.flask:
print('db = SQLAlchemy()', file=outfile)
else:
if any(isinstance(model, ModelClass) for model in self.models):
print('Base = declarative_base()\nmetadata = Base.metadata', file=outfile)
else:
print('metadata = MetaData()', file=outfile)
# Render the model tables and classes
for model in self.models:
print('\n\n', file=outfile)
print(model.render().rstrip('\n'), file=outfile)
if self.footer:
print(self.footer, file=outfile)
|
DQMOffline/JetMET/test/run_PromptAna_GR108290_Calo.py | ckamtsikis/cmssw | 852 | 11138482 | #
import FWCore.ParameterSet.Config as cms
process = cms.Process("test")
process.load("CondCore.DBCommon.CondDBSetup_cfi")
#
# DQM
#
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.MEtoEDMConverter_cfi")
# HCALNoise module
process.load("RecoMET.METProducers.hcalnoiseinfoproducer_cfi")
process.hcalnoise.refillRefVectors = cms.bool(True)
process.hcalnoise.hcalNoiseRBXCollName = "hcalnoise"
process.hcalnoise.requirePedestals = cms.bool(False)
# the task - JetMET objects
process.load("DQMOffline.JetMET.jetMETDQMOfflineSourceCosmic_cff")
process.jetMETAnalyzer.OutputMEsInRootFile = cms.bool(True)
#process.jetMETAnalyzer.OutputFileName = cms.string('jetMETMonitoring_GR108290_Calo.root')
process.jetMETAnalyzer.OutputFileName = cms.string('jetMETMonitoring_TTbar.root')
process.jetMETAnalyzer.DoJetPtAnalysis = cms.untracked.bool(True)
process.jetMETAnalyzer.caloMETAnalysis.allSelection = cms.bool(True)
process.jetMETAnalyzer.caloMETNoHFAnalysis.allSelection = cms.bool(True)
process.jetMETAnalyzer.caloMETHOAnalysis.allSelection = cms.bool(True)
process.jetMETAnalyzer.caloMETNoHFHOAnalysis.allSelection = cms.bool(True)
process.jetMETAnalyzer.caloMETAnalysis.verbose = cms.int32(0)
# the task - JetMET trigger
process.load("DQMOffline.Trigger.JetMETHLTOfflineSource_cfi")
# check # of bins
process.load("DQMServices.Components.DQMStoreStats_cfi")
# for igprof
#process.IgProfService = cms.Service("IgProfService",
# reportFirstEvent = cms.untracked.int32(0),
# reportEventInterval = cms.untracked.int32(25),
# reportToFileAtPostEvent = cms.untracked.string("| gzip -c > igdqm.%I.gz")
#)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
# 'file:/home/xv/sordini/QCD_Pt170_summer09_31X_V3-v1_GEN-SIM-RECO.root'
'/store/relval/CMSSW_3_4_0_pre1/RelValTTbar/GEN-SIM-RECO/MC_31X_V9-v1/0008/2C8CD8FE-B6B5-DE11-ACB8-001D09F2905B.root'
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/FC70C563-1678-DE11-92EC-000423D999CA.root'
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/F8AF03F6-1978-DE11-829B-001D09F252F3.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/F83D885F-1678-DE11-BAC1-000423D951D4.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/F26AA2D6-1278-DE11-B9C5-000423D98FBC.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/E6B19C9E-1A78-DE11-B0DC-000423D98EC8.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/E417AC6B-1678-DE11-91E2-000423D986A8.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/E0C2C65E-1678-DE11-B755-000423D98BC4.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/DE34EE8D-1878-DE11-A20C-001D09F24024.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/C0115BD1-1778-DE11-9D63-001D09F24DDA.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/B61073CE-1778-DE11-BAC0-001D09F244BB.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/B4A66260-1678-DE11-AF5A-000423D944F8.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/AC18D41F-1778-DE11-898F-000423D94494.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/A8842AFA-1478-DE11-9C71-000423D991D4.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/9EC2FB1F-1778-DE11-90D3-001D09F34488.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/948CEE47-1478-DE11-8610-000423D951D4.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/9243415C-1B78-DE11-8650-001D09F24DDA.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/808C5C48-1478-DE11-BE29-000423D944F0.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/78FC8993-1F78-DE11-B79F-000423D99160.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/76A40786-1878-DE11-A8BB-001D09F23E53.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/76A299CE-1778-DE11-8CBE-000423D98E54.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/709AB95E-1678-DE11-AF06-000423D98E6C.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/689F8D60-1678-DE11-B981-000423D99896.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/68457387-1878-DE11-BD30-001D09F2503C.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/64E9C35C-1B78-DE11-ADC6-001D09F24D8A.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/60E22226-1778-DE11-A958-001D09F2983F.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/5CF42529-1778-DE11-B52D-0019B9F72D71.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/5C7C76AF-2178-DE11-B49A-000423D998BA.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/586703D2-1778-DE11-8DDC-001D09F24399.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/52776364-1678-DE11-B9EA-000423D990CC.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/4C4D1F60-1678-DE11-A9BD-000423D99658.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/4A5576CF-1778-DE11-AF6A-001D09F25438.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/40CF75CE-1778-DE11-95E1-001D09F24DA8.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/3E56DB20-1778-DE11-9041-001D09F2B2CF.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/363856D0-1778-DE11-8E60-000423D991F0.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/2AF0D062-2278-DE11-A9EF-001D09F2437B.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/2AD949CF-1778-DE11-AB52-001D09F29114.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/1C584562-2278-DE11-9835-001D09F2538E.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/1C55F321-1778-DE11-A1CF-001D09F24EAC.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/0E943F48-1478-DE11-849B-000423D98E6C.root',
# '/store/data/Commissioning09/Calo/RECO/v6/000/108/290/0AFFD0CE-1778-DE11-87A6-001D09F2AF96.root',
# '/store/data/Commissioning09/Calo/RECO/v3/000/100/945/FA72B935-0960-DE11-A902-000423D98DB4.root'
)
)
process.source.inputCommands = cms.untracked.vstring('keep *', 'drop *_MEtoEDMConverter_*_*')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
process.Timing = cms.Service("Timing")
process.MessageLogger = cms.Service("MessageLogger",
debugModules = cms.untracked.vstring('jetMETAnalyzer'),
cout = cms.untracked.PSet(
default = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
jetMETAnalyzer = cms.untracked.PSet(
limit = cms.untracked.int32(100)
),
noLineBreaks = cms.untracked.bool(True),
DEBUG = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
#FwkJob = cms.untracked.PSet(
# limit = cms.untracked.int32(0)
#),
threshold = cms.untracked.string('DEBUG')
),
categories = cms.untracked.vstring('jetMETAnalyzer'),
destinations = cms.untracked.vstring('cout')
)
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
process.FEVT = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('keep *_MEtoEDMConverter_*_*'),
#outputCommands = cms.untracked.vstring('keep *'),
fileName = cms.untracked.string('reco_DQM_GR108290_Calo.root')
)
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True) ## default is false
)
process.p = cms.Path(process.hcalnoise
* process.jetMETHLTOfflineSource
* process.jetMETDQMOfflineSourceCosmic
* process.MEtoEDMConverter
* process.dqmStoreStats)
process.outpath = cms.EndPath(process.FEVT)
process.DQM.collectorHost = ''
|
ad_examples/aad/aad_stream.py | matwey/ad_examples | 773 | 11138488 | import os
import logging
import numpy as np
from ..common.utils import (
logger, InstanceList, Timer, append_instance_lists, cbind, rbind, append, matrix, read_data_as_matrix,
get_sample_feature_ranges, configure_logger
)
from ..common.metrics import fn_auc
from .aad_globals import (
STREAM_RETENTION_OVERWRITE, STREAM_RETENTION_TOP_ANOMALOUS, get_aad_command_args, AadOpts,
get_first_vals_not_marked
)
from .aad_base import get_budget_topK, Ensemble
from .forest_aad_detector import is_forest_detector
from .query_model import Query
from .aad_support import get_aad_model, load_aad_model, SequentialResults, write_sequential_results_to_csv
from .data_stream import DataStream, IdServer
from .aad_test_support import plot_score_contours
from .query_model_euclidean import filter_by_euclidean_distance
class StreamingAnomalyDetector(object):
"""
Attributes:
model: Aad
trained AAD model
stream: DataStream
max_buffer: int
Determines the window size
labeled: InstanceList
unlabeled: InstanceList
buffer: InstanceList
test set from stream
initial_labeled: InstanceList
initial_anomalies: InstanceList
initial_nominals: InstanceList
opts: AadOpts
"""
def __init__(self, stream, model, labeled_x=None, labeled_y=None, labeled_ids=None,
unlabeled_x=None, unlabeled_y=None, unlabeled_ids=None, opts=None,
max_buffer=512, min_samples_for_update=256):
self.model = model
self.stream = stream
self.max_buffer = max_buffer
self.min_samples_for_update = min_samples_for_update
self.opts = opts
self.n_prelabeled_instances = 0
self.buffer = None
self.initial_labeled, self.initial_anomalies, self.initial_nominals = \
self.get_initial_labeled(labeled_x, labeled_y, labeled_ids)
self.labeled = self._get_pretrain_labeled()
if self.labeled is not None:
self.n_prelabeled_instances = self.labeled.x.shape[0]
self.unlabeled = None
if unlabeled_x is not None:
self.unlabeled = InstanceList(x=unlabeled_x, y=unlabeled_y, ids=unlabeled_ids)
# transform the features and cache...
self.unlabeled.x_transformed = self.get_transformed(self.unlabeled.x)
self.qstate = None
self.feature_ranges = None # required if diverse querying strategy is used
self.current_dists = None
self.kl_alpha = opts.kl_alpha
self.kl_q_alpha = 0.
if is_forest_detector(self.opts.detector_type):
# initialize the baseline instance distributions required for evaluating KL-divergence
all_instances = self._get_all_instances()
self.current_dists = self.model.get_node_sample_distributions(all_instances.x)
kl_trees, self.kl_q_alpha = self.model.get_KL_divergence_distribution(all_instances.x, alpha=self.kl_alpha)
logger.debug("kl kl_q_alpha: %s (alpha=%0.2f), kl mean: %f, kl_trees:\n%s" %
(str(list(self.kl_q_alpha)), self.kl_alpha, np.mean(kl_trees), str(list(kl_trees))))
self._pre_train(debug_auc=True)
def _pre_train(self, debug_auc=False):
if not self.opts.pretrain or self.initial_labeled is None or self.opts.n_pretrain == 0:
return
ha = np.where(self.initial_labeled.y == 1)[0]
# hn = np.where(self.initial_labeled.y == 0)[0]
# set hn to empty array for pre-training. Since all instances are labeled,
# we just focus on getting the labeled anomalies ranked at the top
hn = np.zeros(0, dtype=int)
if len(ha) == 0 or len(ha) == len(self.initial_labeled.y):
logger.debug("At least one example from each class (anomaly, nominal) is required for pretraining.")
return
logger.debug("Pre-training %d rounds with anomalies: %d, nominals: %d..." %
(self.opts.n_pretrain, len(ha), len(self.initial_labeled.y)-len(ha)))
tm = Timer()
x, y, ids, x_transformed = self.initial_labeled.x, self.initial_labeled.y, self.initial_labeled.ids, self.initial_labeled.x_transformed
orig_tau = self.opts.tau
self.opts.tau = len(ha)*1.0 / len(self.initial_labeled.y)
auc = self.get_auc(x=x, y=y, x_transformed=x_transformed)
if self.opts.dataset in ['toy', 'toy2', 'toy_hard']:
plot_score_contours(x, y, x_transformed, model=self.model,
filename="baseline", outputdir=self.opts.resultsdir,
opts=self.opts)
if debug_auc: logger.debug("AUC[0]: %f" % (auc))
best_i = 0
best_auc = auc
best_w = self.model.w
for i in range(self.opts.n_pretrain):
self.model.update_weights(x_transformed, y, ha, hn, self.opts)
auc = self.get_auc(x=x, y=y, x_transformed=x_transformed)
if debug_auc: logger.debug("AUC[%d]: %f" % (i + 1, auc))
if best_auc < auc:
best_auc = auc
best_w = np.copy(self.model.w)
best_i = i+1
logger.debug("best_i: %d, best_auc: %f" % (best_i, best_auc))
self.model.w = best_w
self.opts.tau = orig_tau
if self.opts.dataset in ['toy', 'toy2', 'toy_hard']:
# some DEBUG plots
selx = None
if self.labeled is not None:
idxs = np.where(self.labeled.y == 0)[0]
logger.debug("#selx: %d" % len(idxs))
selx = self.labeled.x[idxs]
plot_score_contours(x, y, x_transformed, selected_x=selx, model=self.model,
filename="pre_train", outputdir=self.opts.resultsdir,
opts=self.opts)
logger.debug(tm.message("Updated weights %d times with no feedback " % self.opts.n_pretrain))
def get_initial_labeled(self, x, y, ids):
"""Returns the labeled instances as InstanceLists
:param x: np.ndarray
:param y: np.array
:param ids: np.array
:return: InstanceList, InstanceList, InstanceList
"""
initial_labeled = initial_anomalies = initial_nominals = None
if x is not None:
initial_labeled = InstanceList(x=x, y=y, ids=ids)
# transform the features and cache...
initial_labeled.x_transformed = self.get_transformed(initial_labeled.x)
initial_anomalies, initial_nominals = self._separate_anomaly_nominal(initial_labeled)
return initial_labeled, initial_anomalies, initial_nominals
def _get_pretrain_labeled(self):
"""Returns a subset of the initial labeled data which will be utilized in future
First, we retain all labeled anomalies since these provide vital information.
Retaining all nominals might result in severe class imbalance if they are in
relatively larger number compared to anomalies. Therefore, we subsample the nominals.
We need to determine a reasonable informative set of nominals. For this, we utilize
Euclidean-diversity based strategy. We retain the nominals which have highest
average distance from the anomalies as well as other selected nominals.
:return: InstanceList
"""
l = self.initial_labeled
if l is None:
return None
if self.opts.n_pretrain_nominals < 0:
# include all nominal instances
labeled = InstanceList(x=self.initial_labeled.x, y=self.initial_labeled.y,
ids=self.initial_labeled.ids,
x_transformed=self.initial_labeled.x_transformed)
elif self.opts.n_pretrain_nominals == 0:
# completely ignore nominals and only retain anomalies
labeled = InstanceList(x=self.initial_anomalies.x, y=self.initial_anomalies.y,
ids=self.initial_anomalies.ids,
x_transformed=self.initial_anomalies.x_transformed)
else:
# select a subset of nominals
tm = Timer()
anom_idxs = np.where(l.y == 1)[0]
noml_idxs = np.where(l.y == 0)[0]
# set number of nominals...
n_nominals = min(self.opts.n_pretrain_nominals, len(anom_idxs))
if n_nominals > 0:
selected_indexes = filter_by_euclidean_distance(l.x,
noml_idxs, init_selected=anom_idxs,
n_select=n_nominals)
else:
selected_indexes = anom_idxs
selected_indexes = np.array(selected_indexes, dtype=int)
labeled = InstanceList(x=l.x[selected_indexes], y=l.y[selected_indexes],
x_transformed=l.x_transformed[selected_indexes],
ids=l.ids[selected_indexes])
logger.debug(tm.message("Total labeled: %d, anomalies: %d, nominals: %d" %
(labeled.x.shape[0], len(anom_idxs), len(selected_indexes)-len(anom_idxs))))
return labeled
def _separate_anomaly_nominal(self, labeled):
anom_idxs = np.where(labeled.y == 1)[0]
noml_idxs = np.where(labeled.y == 0)[0]
anomalies = None
nominals = None
if len(anom_idxs) > 0:
anomalies = InstanceList(x=labeled.x[anom_idxs], y=labeled.y[anom_idxs], ids=labeled.ids[anom_idxs],
x_transformed=labeled.x_transformed[anom_idxs])
if len(noml_idxs) > 0:
nominals = InstanceList(x=labeled.x[noml_idxs], y=labeled.y[noml_idxs], ids=labeled.ids[noml_idxs],
x_transformed=labeled.x_transformed[noml_idxs])
return anomalies, nominals
def _get_all_instances(self):
if self.labeled is not None and self.unlabeled is not None:
all_instances = append_instance_lists(self.labeled, self.unlabeled)
elif self.labeled is not None:
all_instances = self.labeled
else:
all_instances = self.unlabeled
return all_instances
def reset_buffer(self):
self.buffer = None
def add_to_buffer(self, instances):
if self.buffer is not None:
self.buffer.add_instances(instances.x, instances.y,
instances.ids, instances.x_transformed)
else:
self.buffer = instances
def move_buffer_to_unlabeled(self):
if self.opts.retention_type == STREAM_RETENTION_OVERWRITE:
if False:
missed = int(np.sum(self.unlabeled.y)) if self.unlabeled.y is not None else 0
retained = int(np.sum(self.buffer.y)) if self.buffer.y is not None else 0
logger.debug("[overwriting] true anomalies: missed(%d), retained(%d)" % (missed, retained))
if self.buffer is not None:
self.unlabeled = self.buffer
elif self.opts.retention_type == STREAM_RETENTION_TOP_ANOMALOUS:
# retain the top anomalous instances from the merged
# set of instance from both buffer and current unlabeled.
if self.buffer is not None and self.unlabeled is not None:
tmp = append_instance_lists(self.unlabeled, self.buffer)
elif self.buffer is not None:
tmp = self.buffer
else:
tmp = self.unlabeled
n = min(tmp.x.shape[0], self.max_buffer)
idxs, scores = self.model.order_by_score(tmp.x_transformed)
top_idxs = idxs[np.arange(n)]
tmp_x, tmp_y, tmp_ids, tmp_trans = tmp.get_instances_at(top_idxs)
self.unlabeled = InstanceList(x=tmp_x, y=tmp_y, ids=tmp_ids, x_transformed=tmp_trans)
# self.unlabeled = InstanceList(x=tmp.x[top_idxs],
# y=tmp.y[top_idxs],
# x_transformed=tmp.x_transformed[top_idxs])
if n < len(tmp.y):
missedidxs = idxs[n:len(tmp.y)]
else:
missedidxs = None
if False:
missed = int(np.sum(tmp.y[missedidxs])) if missedidxs is not None else 0
retained = int(np.sum(self.unlabeled.y)) if self.unlabeled.y is not None else 0
logger.debug("[top anomalous] true anomalies: missed(%d), retained(%d)" % (missed, retained))
self.feature_ranges = get_sample_feature_ranges(self.unlabeled.x)
self.reset_buffer()
def get_num_instances(self):
"""Returns the total number of labeled and unlabeled instances that will be used for weight inference"""
n = 0
if self.unlabeled is not None:
n += len(self.unlabeled)
if self.labeled is not None:
# logger.debug("labeled_x: %s" % str(self.labeled_x.shape))
n += len(self.labeled)
return n
def init_query_state(self):
n = self.get_num_instances()
bt = get_budget_topK(n, self.opts)
self.qstate = Query.get_initial_query_state(self.opts.qtype, opts=self.opts, qrank=bt.topK,
a=1., b=1., budget=bt.budget)
def get_next_from_stream(self, n=0, transform=False):
if n == 0:
n = self.max_buffer
instances = self.stream.read_next_from_stream(n)
if instances is not None:
if False:
if self.buffer is not None:
logger.debug("buffer shape: %s" % str(self.buffer.x.shape))
logger.debug("x.shape: %s" % str(instances.x.shape))
if transform:
instances.x_transformed = self.get_transformed(instances.x)
self.add_to_buffer(instances)
self.model.add_samples(instances.x, current=False)
return instances
def update_model_from_buffer(self, transform=False):
"""Updates the underlying model if it meets the criteria
The minimum number of samples required for model update is:
max(self.min_samples_for_update, self.opts.stream_window//2)
We will replace trees in the following conditions:
- if check_KL_divergence is True, then check whether the KL-divergence
from reference distributions of 2*kl_alpha number of trees exceed
the alpha-threshold; if so, then replace all trees which exceed their
respective thresholds.
- if check_KL_divergence is False, then replace the configured fraction of
oldest trees. The fraction is configured with the command line
parameter --forest_replace_frac.
:param transform: bool
:return:
"""
model_updated = False
min_samples_required = max(self.min_samples_for_update, self.opts.stream_window//2)
if self.buffer is None or self.buffer.x is None or self.buffer.x.shape[0] < min_samples_required:
logger.warning("Insufficient samples (%d) for model update. Minimum required: %d = max(%d,%d)." %
(0 if self.buffer is None or self.buffer.x is None else self.buffer.x.shape[0],
min_samples_required, self.min_samples_for_update, self.opts.stream_window//2))
else:
tm = Timer()
n_trees = self.model.clf.n_estimators
n_threshold = int(2 * self.kl_alpha * n_trees)
replace_trees_by_kl = None
if self.opts.check_KL_divergence:
kl_trees, _ = self.model.get_KL_divergence_distribution(self.buffer.x, p=self.current_dists)
replace_trees_by_kl = self.model.get_trees_to_replace(kl_trees, self.kl_q_alpha)
logger.debug("kl kl_q_alpha: %s (alpha=%0.2f), kl_trees:\n%s\n(#replace: %d): %s" %
(str(list(self.kl_q_alpha)), self.kl_alpha, str(list(kl_trees)), len(replace_trees_by_kl), str(list(replace_trees_by_kl))))
n_replace = 0 if replace_trees_by_kl is None else len(replace_trees_by_kl)
# check whether conditions for tree-replacement are satisfied.
do_replace = not self.opts.check_KL_divergence or (n_trees > 0 and n_replace >= n_threshold)
if do_replace:
self.model.update_model_from_stream_buffer(replace_trees=replace_trees_by_kl)
if is_forest_detector(self.opts.detector_type):
self.current_dists = self.model.get_node_sample_distributions(self.buffer.x)
kl_trees, self.kl_q_alpha = self.model.get_KL_divergence_distribution(self.buffer.x, alpha=self.kl_alpha)
logger.debug("kl kl_q_alpha: %s, kl_trees:\n%s" % (str(list(self.kl_q_alpha)), str(list(kl_trees))))
model_updated = True
logger.debug(tm.message(
"Model%s updated; n_replace: %d, n_threshold: %d, kl_q_alpha: %s (check_KL: %s, alpha: %0.2f)" %
(" not" if not do_replace else "", n_replace, n_threshold,
str(list(self.kl_q_alpha)), str(self.opts.check_KL_divergence), self.kl_alpha)
))
if transform:
if self.labeled is not None and self.labeled.x is not None:
self.labeled.x_transformed = self.get_transformed(self.labeled.x)
if self.unlabeled is not None and self.unlabeled.x is not None:
self.unlabeled.x_transformed = self.get_transformed(self.unlabeled.x)
if self.buffer is not None and self.buffer.x is not None:
self.buffer.x_transformed = self.get_transformed(self.buffer.x)
return model_updated
def stream_buffer_empty(self):
return self.stream.empty()
def get_anomaly_scores(self, x, x_transformed=None):
if x_transformed is None:
x_new = self.get_transformed(x)
else:
if x.shape[0] != x_transformed.shape[0]:
raise ValueError("x(%d) and x_transformed(%d) are inconsistent" % (x.shape[0], x_transformed.shape[0]))
x_new = x_transformed
scores = self.model.get_score(x_new)
return scores
def get_auc(self, x, y, x_transformed=None):
scores = self.get_anomaly_scores(x, x_transformed=x_transformed)
auc = fn_auc(cbind(y, -scores))
return auc
def get_allowed_labeled_subset(self):
""" Returns a randomly selected subset of labeled instances
The number of instances returned is determined by the upper limit
specified through the optional parameters opts.labeled_to_window_ratio
and opts.max_labeled_for_stream in the streaming mode.
"""
# first, compute the maximum number of labeled instances allowed for
# computing AAD losses and constraints...
n_labeled = 0 if self.labeled is None else len(self.labeled.x)
if n_labeled == 0 or (self.opts.labeled_to_window_ratio is None and self.opts.max_labeled_for_stream is None):
return self.labeled
n_allowed_labeled = self.max_buffer if self.opts.labeled_to_window_ratio is None \
else int(self.opts.labeled_to_window_ratio * self.max_buffer)
n_allowed_labeled = n_allowed_labeled if self.opts.max_labeled_for_stream is None \
else min(n_allowed_labeled, self.opts.max_labeled_for_stream)
n_allowed_labeled = min(n_allowed_labeled, n_labeled)
if n_allowed_labeled == n_labeled:
return self.labeled
labeled = InstanceList(x=self.labeled.x, y=self.labeled.y,
ids=self.labeled.ids, x_transformed=self.labeled.x_transformed)
n_per_type = n_allowed_labeled // 2
anom_idxs = np.where(self.labeled.y == 1)[0]
noml_idxs = np.where(self.labeled.y == 0)[0]
if len(anom_idxs) > n_per_type:
np.random.shuffle(anom_idxs)
idxs = anom_idxs[0:n_per_type]
else:
idxs = anom_idxs
n_anoms = len(idxs)
n_nomls = n_allowed_labeled - n_anoms
if len(noml_idxs) > n_nomls:
np.random.shuffle(noml_idxs)
idxs = np.append(idxs, noml_idxs[0:n_nomls])
else:
idxs = np.append(idxs, noml_idxs)
n_nomls = len(idxs) - n_anoms
if False:
logger.debug("n_labeled: %d, n_allowed_labeled: %d, n_anoms: %d, n_nomls: %d" %
(n_labeled, n_allowed_labeled, n_anoms, n_nomls))
mask = np.zeros(n_labeled, dtype=bool)
mask[idxs[0:n_allowed_labeled]] = True
labeled.retain_with_mask(mask)
return labeled
def setup_data_for_feedback(self):
"""
Prepares the input matrices/data structures for weight update. The format
is such that the top rows of data matrix are labeled and below are unlabeled.
:return: (np.ndarray, np.array, np.array, np.array)
(x, y, ha, hn)
x - data matrix, y - labels (np.nan for unlabeled),
ha - indexes of labeled anomalies, hn - indexes of labeled nominals
"""
labeled = self.get_allowed_labeled_subset()
if labeled is None:
tmp = self.unlabeled
elif self.unlabeled is None:
tmp = labeled
else:
tmp = append_instance_lists(labeled, self.unlabeled)
if labeled is not None:
ha = np.where(labeled.y == 1)[0]
hn = np.where(labeled.y == 0)[0]
else:
ha = np.zeros(0, dtype=int)
hn = np.zeros(0, dtype=int)
if False:
logger.debug("x: %d, ha: %d, hn:%d" % (nrow(tmp.x), len(ha), len(hn)))
return tmp, ha, hn
def get_instance_stats(self):
nha = nhn = nul = 0
if self.labeled is not None and self.labeled.y is not None:
nha = len(np.where(self.labeled.y == 1)[0])
nhn = len(np.where(self.labeled.y == 0)[0])
if self.unlabeled is not None:
nul = len(self.unlabeled)
return nha, nhn, nul
def get_num_labeled(self):
"""Returns the number of instances for which we already have label feedback"""
if self.labeled is not None:
return len(self.labeled.y)
return 0
def reestimate_tau(self, default_tau):
"""Re-estimate the proportion of anomalies
The number of true anomalies discovered might end up being high
relative to the data in the memory. We need to adjust for that...
:param default_tau: float
default proportion of anomalies
:return: float
"""
new_tau = default_tau
nha, nhn, nul = self.get_instance_stats()
frac_known_anom = nha * 1.0 / (nha + nhn + nul)
if frac_known_anom >= default_tau:
new_tau = frac_known_anom + 0.01
logger.debug("Exceeded original tau (%f); setting tau=%f" % (default_tau, new_tau))
return new_tau
def update_weights_with_no_feedback(self, n_train=None, debug_auc=False):
"""Runs the weight update n times
This is used when:
1. There has been a significant update to the model because
of (say) data drift and we want to iteratively estimate the
ensemble weights and the tau-quantile value a number of times.
2. We have an initial fully labeled set with which we want to
pretrain the model
"""
n = n_train if n_train is not None else self.opts.n_weight_updates_after_stream_window
if self.opts.do_not_update_weights or n <= 0:
return
tm = Timer()
tmp, ha, hn = self.setup_data_for_feedback()
x, y, ids, x_transformed = tmp.x, tmp.y, tmp.ids, tmp.x_transformed
orig_tau = self.opts.tau
self.opts.tau = self.reestimate_tau(orig_tau)
if debug_auc: logger.debug("AUC[0]: %f" % (self.get_auc(x=x, y=y, x_transformed=x_transformed)))
for i in range(n):
self.model.update_weights(x_transformed, y, ha, hn, self.opts)
if debug_auc: logger.debug("AUC[%d]: %f" % (i+1, self.get_auc(x=x, y=y, x_transformed=x_transformed)))
self.opts.tau = orig_tau
logger.debug(tm.message("Updated weights %d times with no feedback " % n))
def get_query_data(self, x=None, y=None, ids=None, ha=None, hn=None, unl=None, w=None, n_query=1):
"""Returns the best instance that should be queried, along with other data structures
Args:
x: np.ndarray
input instances (labeled + unlabeled)
y: np.array
labels for instances which are already labeled, else some dummy values
ids: np.array
unique instance ids
ha: np.array
indexes of labeled anomalies
hn: np.array
indexes of labeled nominals
unl: np.array
unlabeled instances that should be ignored for query
w: np.array
current weight vector
n_query: int
number of instances to query
"""
if self.get_num_instances() == 0:
raise ValueError("No instances available")
x_transformed = None
if x is None:
tmp, ha, hn = self.setup_data_for_feedback()
x, y, ids, x_transformed = tmp.x, tmp.y, tmp.ids, tmp.x_transformed
n = x.shape[0]
if w is None:
w = self.model.w
if unl is None:
unl = np.zeros(0, dtype=int)
n_feedback = len(ha) + len(hn)
# the top n_feedback instances in the instance list are the labeled items
queried_items = append(np.arange(n_feedback), unl)
if x_transformed is None:
x_transformed = self.get_transformed(x)
logger.debug("needs transformation")
order_anom_idxs, anom_score = self.model.order_by_score(x_transformed)
ensemble = Ensemble(x, original_indexes=0)
xi = self.qstate.get_next_query(maxpos=n, ordered_indexes=order_anom_idxs,
queried_items=queried_items,
ensemble=ensemble,
feature_ranges=self.feature_ranges,
model=self.model,
x=x_transformed, lbls=y, anom_score=anom_score,
w=w, hf=append(ha, hn),
remaining_budget=self.opts.num_query_batch, # self.opts.budget - n_feedback,
n=n_query)
if False:
logger.debug("ordered instances[%d]: %s\nha: %s\nhn: %s\nxi: %s" %
(self.opts.budget, str(list(order_anom_idxs[0:self.opts.budget])),
str(list(ha)), str(list(hn)), str(list(xi))))
return xi, x, y, ids, x_transformed, ha, hn, order_anom_idxs, anom_score
def get_transformed(self, x):
"""Returns the instance.x_transformed
Args:
instances: InstanceList
Returns: scipy sparse array
"""
# logger.debug("transforming data...")
x_transformed = self.model.transform_to_ensemble_features(
x, dense=False, norm_unit=self.opts.norm_unit)
return x_transformed
def move_unlabeled_to_labeled(self, xi, yi):
unlabeled_idxs = xi
x, _, id, x_trans = self.unlabeled.get_instances_at(unlabeled_idxs)
if self.labeled is None:
self.labeled = InstanceList(x=self.unlabeled.x[unlabeled_idxs, :],
y=yi,
ids=None if id is None else id,
x_transformed=x_trans)
else:
self.labeled.add_instance(x, y=yi, id=id, x_transformed=x_trans)
self.unlabeled.remove_instance_at(unlabeled_idxs)
def update_weights_with_feedback(self, xis, yis, x, y, x_transformed, ha, hn):
"""Relearns the optimal weights from feedback and updates internal labeled and unlabeled matrices
IMPORTANT:
This API assumes that the input x, y, x_transformed are consistent with
the internal labeled/unlabeled matrices, i.e., the top rows/values in
these matrices are from labeled data and bottom ones are from internally
stored unlabeled data.
Args:
xis: np.array(dtype=int)
indexes of instances in Union(self.labeled, self.unlabeled)
yis: np.array(dtype=int)
labels {0, 1} of instances (supposedly provided by an Oracle)
x: numpy.ndarray
set of all instances
y: list of int
set of all labels (only those at locations in the lists ha and hn are relevant)
x_transformed: numpy.ndarray
x transformed to ensemble features
ha: list of int
indexes of labeled anomalies
hn: list of int
indexes of labeled nominals
"""
# Add the newly labeled instance to the corresponding list of labeled
# instances and remove it from the unlabeled set.
nhn = len(ha) + len(hn)
self.move_unlabeled_to_labeled(xis - nhn, yis)
for xi, yi in zip(xis, yis):
if yi == 1:
ha = append(ha, [xi])
else:
hn = append(hn, [xi])
if not self.opts.do_not_update_weights:
self.model.update_weights(x_transformed, y, ha, hn, self.opts)
def run_feedback(self):
"""Runs active learning loop for current unlabeled window of data."""
min_feedback = self.opts.min_feedback_per_window
max_feedback = self.opts.max_feedback_per_window
# For the last window, we query till the buffer is exhausted
# irrespective of whether we exceed max_feedback per window limit
if self.stream_buffer_empty() and self.opts.till_budget:
bk = get_budget_topK(self.unlabeled.x.shape[0], self.opts)
n_labeled = 0 if self.labeled is None else len(self.labeled.y)
max_feedback = max(0, bk.budget - (n_labeled - self.n_prelabeled_instances))
max_feedback = min(max_feedback, self.unlabeled.x.shape[0])
if False:
# get baseline metrics
x_transformed = self.get_transformed(self.unlabeled.x)
ordered_idxs, _ = self.model.order_by_score(x_transformed)
seen_baseline = self.unlabeled.y[ordered_idxs[0:max_feedback]]
num_seen_baseline = np.cumsum(seen_baseline)
logger.debug("num_seen_baseline:\n%s" % str(list(num_seen_baseline)))
# baseline scores
w_baseline = self.model.get_uniform_weights()
order_baseline, scores_baseline = self.model.order_by_score(self.unlabeled.x_transformed, w_baseline)
n_seen_baseline = min(max_feedback, len(self.unlabeled.y))
queried_baseline = order_baseline[0:n_seen_baseline]
seen_baseline = self.unlabeled.y[queried_baseline]
orig_tau = self.opts.tau
self.opts.tau = self.reestimate_tau(orig_tau)
seen = np.zeros(0, dtype=int)
n_unlabeled = np.zeros(0, dtype=int)
queried = np.zeros(0, dtype=int)
unl = np.zeros(0, dtype=int)
i = 0
n_feedback = 0
while n_feedback < max_feedback:
i += 1
# scores based on current weights
xi_, x, y, ids, x_transformed, ha, hn, order_anom_idxs, anom_score = \
self.get_query_data(unl=unl, n_query=self.opts.n_explore)
order_anom_idxs_minus_ha_hn = get_first_vals_not_marked(
order_anom_idxs, append(ha, hn), n=len(order_anom_idxs))
bt = get_budget_topK(x_transformed.shape[0], self.opts)
# Note: We will ensure that the tau-th instance is atleast 10-th (or lower) ranked
tau_rank = min(max(bt.topK, 10), x.shape[0])
xi = np.array(xi_, dtype=int)
if n_feedback + len(xi) > max_feedback:
xi = xi[0:(max_feedback - n_feedback)]
n_feedback += len(xi)
# logger.debug("n_feedback: %d, #xi: %d" % (n_feedback, len(xi)))
means = vars = qpos = m_tau = v_tau = None
if self.opts.query_confident:
# get the mean score and its variance for the top ranked instances
# excluding the instances which have already been queried
means, vars, test, v_eval, _ = get_score_variances(x_transformed, self.model.w,
n_test=tau_rank,
ordered_indexes=order_anom_idxs,
queried_indexes=append(ha, hn))
# get the mean score and its variance for the tau-th ranked instance
m_tau, v_tau, _, _, _ = get_score_variances(x_transformed[order_anom_idxs_minus_ha_hn[tau_rank]],
self.model.w, n_test=1,
test_indexes=np.array([0], dtype=int))
qpos = np.where(test == xi[0])[0] # top-most ranked instance
if False and self.opts.query_confident:
logger.debug("tau score:\n%s (%s)" % (str(list(m_tau)), str(list(v_tau))))
strmv = ",".join(["%f (%f)" % (means[j], vars[j]) for j in np.arange(len(means))])
logger.debug("scores:\n%s" % strmv)
# check if we are confident that this is larger than the tau-th ranked instance
if (not self.opts.query_confident) or (n_feedback <= min_feedback or
means[qpos] - 3. * np.sqrt(vars[qpos]) >= m_tau):
seen = np.append(seen, y[xi])
queried_ = [ids[q] for q in xi]
queried = np.append(queried, queried_)
tm_update = Timer()
self.update_weights_with_feedback(xi, y[xi], x, y, x_transformed, ha, hn)
tm_update.end()
# reset the list of queried test instances because their scores would have changed
unl = np.zeros(0, dtype=int)
if False:
nha, nhn, nul = self.get_instance_stats()
# logger.debug("xi:%d, test indxs: %s, qpos: %d" % (xi, str(list(test)), qpos))
# logger.debug("orig scores:\n%s" % str(list(anom_score[order_anom_idxs[0:tau_rank]])))
logger.debug("[%d] #feedback: %d; ha: %d; hn: %d, mnw: %d, mxw: %d; update: %f sec(s)" %
(i, nha + nhn, nha, nhn, min_feedback, max_feedback, tm_update.elapsed()))
else:
# ignore these instances from query
unl = np.append(unl, xi)
# logger.debug("skipping feedback for xi=%d at iter %d; unl: %s" % (xi, i, str(list(unl))))
# continue
n_unlabeled = np.append(n_unlabeled, [int(np.sum(self.unlabeled.y))])
# logger.debug("y:\n%s" % str(list(y)))
self.opts.tau = orig_tau
# logger.debug("w:\n%s" % str(list(sad.model.w)))
return seen, seen_baseline, queried, None, n_unlabeled
def print_instance_stats(self, msg="debug"):
logger.debug("%s:\nlabeled: %s, unlabeled: %s" %
(msg,
'-' if self.labeled is None else str(self.labeled),
'-' if self.unlabeled is None else str(self.unlabeled)))
def train_aad_model(opts, x):
random_state = np.random.RandomState(opts.randseed + opts.fid * opts.reruns + opts.runidx)
# fit the model
model = get_aad_model(x, opts, random_state)
model.fit(x)
model.init_weights(init_type=opts.init)
return model
def prepare_aad_model(x, y, opts):
if opts.load_model and opts.modelfile != "" and os.path.isfile(opts.modelfile):
logger.debug("Loading model from file %s" % opts.modelfile)
model = load_aad_model(opts.modelfile)
else:
model = train_aad_model(opts, x)
if is_forest_detector(model.detector_type):
logger.debug("total #nodes: %d" % (len(model.all_regions)))
if False:
if model.w is not None:
logger.debug("w:\n%s" % str(list(model.w)))
else:
logger.debug("model weights are not set")
return model
def prepare_stream_anomaly_detector(stream, opts):
"""Prepares an instance of the StreamingAnomalyDetector
:param stream: DataStream
:param opts: AadOpts
:param pretrain: boolean
If True, then treats the first window of data as fully *LABELED* and updates
the weights with the labeled data. Next, fetches the next window of data
as fully *UNLABELED* and updates tree structure if needed.
If False, then treats the first window of data as fully unlabeled.
:param n_pretrain: int
Number of times to run the weight update if pre-training is required.
:return: StreamingAnomalyDetector
"""
training_set = stream.read_next_from_stream(opts.stream_window)
X_train, y_train, ids = training_set.x, training_set.y, training_set.ids
model = prepare_aad_model(X_train, y_train, opts) # initial model training
if opts.pretrain:
# first window pre-trains the model as fully labeled set
sad = StreamingAnomalyDetector(stream, model,
labeled_x=X_train, labeled_y=y_train, labeled_ids=ids,
max_buffer=opts.stream_window, opts=opts)
# second window is treated as fully unlabeled
instances = sad.get_next_from_stream(sad.max_buffer,
transform=(not opts.allow_stream_update))
if instances is not None:
model_updated = False
if opts.allow_stream_update:
model_updated = sad.update_model_from_buffer(transform=True)
sad.move_buffer_to_unlabeled()
if model_updated:
sad.update_weights_with_no_feedback()
sad.feature_ranges = get_sample_feature_ranges(instances.x)
else:
sad.feature_ranges = get_sample_feature_ranges(X_train)
sad.init_query_state()
else:
# first window is treated as fully unlabeled
sad = StreamingAnomalyDetector(stream, model,
unlabeled_x=X_train, unlabeled_y=y_train, unlabeled_ids=ids,
max_buffer=opts.stream_window, opts=opts)
sad.feature_ranges = get_sample_feature_ranges(X_train)
sad.init_query_state()
return sad
def aad_stream():
logger = logging.getLogger(__name__)
# PRODUCTION
args = get_aad_command_args(debug=False)
# print "log file: %s" % args.log_file
configure_logger(args)
opts = AadOpts(args)
# print opts.str_opts()
logger.debug(opts.str_opts())
if not opts.streaming:
raise ValueError("Only streaming supported")
np.random.seed(opts.randseed)
X_full, y_full = read_data_as_matrix(opts)
logger.debug("loaded file: (%s) %s" % (str(X_full.shape), opts.datafile))
logger.debug("results dir: %s" % opts.resultsdir)
all_num_seen = None
all_num_not_seen = None
all_num_seen_baseline = None
all_queried = None
all_window = None
all_window_baseline = None
opts.fid = 1
for runidx in opts.get_runidxs():
tm_run = Timer()
opts.set_multi_run_options(opts.fid, runidx)
stream = DataStream(X_full, y_full, IdServer(initial=0))
# from aad.malware_aad import MalwareDataStream
# stream = MalwareDataStream(X_full, y_full, IdServer(initial=0))
sad = prepare_stream_anomaly_detector(stream, opts)
if sad.unlabeled is None:
logger.debug("No instances to label")
continue
iter = 0
seen = np.zeros(0, dtype=int)
n_unlabeled = np.zeros(0, dtype=int)
seen_baseline = np.zeros(0, dtype=int)
queried = np.zeros(0, dtype=int)
stream_window_tmp = np.zeros(0, dtype=int)
stream_window_baseline = np.zeros(0, dtype=int)
stop_iter = False
while not stop_iter:
iter += 1
tm = Timer()
seen_, seen_baseline_, queried_, queried_baseline_, n_unlabeled_ = sad.run_feedback()
# gather metrics...
seen = append(seen, seen_)
n_unlabeled = append(n_unlabeled, n_unlabeled_)
seen_baseline = append(seen_baseline, seen_baseline_)
queried = append(queried, queried_)
stream_window_tmp = append(stream_window_tmp, np.ones(len(seen_)) * iter)
stream_window_baseline = append(stream_window_baseline, np.ones(len(seen_baseline_)) * iter)
# get the next window of data from stream and transform features...
# Note: Since model update will automatically transform the data, we will
# not transform while reading from stream. If however, the model is not
# to be updated, then we transform the data while reading from stream
instances = sad.get_next_from_stream(sad.max_buffer,
transform=(not opts.allow_stream_update))
if instances is None or iter >= opts.max_windows or len(queried) >= opts.budget:
if iter >= opts.max_windows:
logger.debug("Exceeded %d iters; exiting stream read..." % opts.max_windows)
stop_iter = True
else:
model_updated = False
if opts.allow_stream_update:
model_updated = sad.update_model_from_buffer(transform=True)
sad.move_buffer_to_unlabeled()
if model_updated:
sad.update_weights_with_no_feedback()
logger.debug(tm.message("Stream window [%d]: algo [%d/%d]; baseline [%d/%d]; unlabeled anoms [%d]: " %
(iter, int(np.sum(seen)), len(seen),
int(np.sum(seen_baseline)), len(seen_baseline),
int(np.sum(sad.unlabeled.y)))))
# retained = int(np.sum(sad.unlabeled_y)) if sad.unlabeled_y is not None else 0
# logger.debug("Final retained unlabeled anoms: %d" % retained)
num_seen_tmp = np.cumsum(seen)
# logger.debug("\nnum_seen : %s" % (str(list(num_seen_tmp)),))
num_seen_baseline = np.cumsum(seen_baseline)
# logger.debug("Numseen in %d budget (overall):\n%s" % (opts.budget, str(list(num_seen_baseline))))
stream_window_baseline = append(np.array([opts.fid, opts.runidx],
dtype=stream_window_baseline.dtype),
stream_window_baseline)
stream_window = np.ones(len(stream_window_baseline) + 2, dtype=stream_window_tmp.dtype) * -1
stream_window[0:2] = [opts.fid, opts.runidx]
stream_window[2:(2+len(stream_window_tmp))] = stream_window_tmp
# num_seen_baseline has the uniformly maximum number of queries.
# the number of queries in num_seen will vary under the query confidence mode
num_seen = np.ones(len(num_seen_baseline) + 2, dtype=num_seen_tmp.dtype) * -1
num_not_seen = np.ones(len(num_seen_baseline) + 2, dtype=num_seen.dtype) * -1
num_seen[0:2] = [opts.fid, opts.runidx]
num_seen[2:(2+len(num_seen_tmp))] = num_seen_tmp
queried_ids = np.ones(len(num_seen_baseline) + 2, dtype=num_seen_tmp.dtype) * -1
queried_ids[0:2] = [opts.fid, opts.runidx]
# IMPORTANT:: The queried indexes are output as 1-indexed (NOT zero-indexed)
# logger.debug("queried:\n%s\n%s" % (str(list(queried)), str(list(y_full[queried]))))
queried_ids[2:(2 + len(queried))] = queried + 1
# the number of unlabeled instances in buffer. For streaming this is
# important since this represents the potential to discover true
# anomalies. True anomalies in unlabeled set should not get discarded
# when a new window of data arrives.
num_not_seen[0:2] = [opts.fid, opts.runidx]
num_not_seen[2:(2+len(n_unlabeled))] = n_unlabeled
num_seen_baseline = append(np.array([opts.fid, opts.runidx], dtype=num_seen_baseline.dtype), num_seen_baseline)
all_num_seen = rbind(all_num_seen, matrix(num_seen, nrow=1))
all_num_not_seen = rbind(all_num_not_seen, matrix(num_not_seen, nrow=1))
all_num_seen_baseline = rbind(all_num_seen_baseline, matrix(num_seen_baseline, nrow=1))
all_queried = rbind(all_queried, matrix(queried_ids, nrow=1))
all_window = rbind(all_window, matrix(stream_window, nrow=1))
all_window_baseline = rbind(all_window_baseline, matrix(stream_window_baseline, nrow=1))
logger.debug(tm_run.message("Completed runidx: %d" % runidx))
results = SequentialResults(num_seen=all_num_seen,
num_not_seen=all_num_not_seen,
true_queried_indexes=all_queried,
num_seen_baseline=all_num_seen_baseline,
# true_queried_indexes_baseline=all_queried_baseline,
stream_window=all_window,
stream_window_baseline=all_window_baseline,
aucs=None)
write_sequential_results_to_csv(results, opts)
if __name__ == "__main__":
aad_stream()
|
tests/epyccel/test_epyccel_IfTernaryOperator.py | dina-fouad/pyccel | 206 | 11138489 | <reponame>dina-fouad/pyccel
# pylint: disable=missing-function-docstring, missing-module-docstring/
import pytest
from pyccel.epyccel import epyccel
from pyccel.decorators import types
# wp suffix means With Parentheses
#------------------------------------------------------------------------------
def test_f1(language):
@types('int')
def f1(x):
a = 5 if x < 5 else x
return a
f = epyccel(f1, language = language)
# ...
assert f(6) == f1(6)
assert f(4) == f1(4)
# ...
#------------------------------------------------------------------------------
def test_f2(language):
@types('int')
def f2(x):
a = 5.5 if x < 5 else x
return a
f = epyccel(f2, language = language)
# ...
assert f(6) == f2(6)
assert f(4) == f2(4)
# ...
#------------------------------------------------------------------------------
def test_f3(language):
@types('int')
def f3(x):
a = x if x < 5 else 5 + 2
return a
@types('int')
def f3wp(x):
a = (x if x < 5 else 5) + 2
return a
f = epyccel(f3, language = language)
fwp = epyccel(f3wp, language = language)
# ...
assert f(6) == f3(6)
assert f(4) == f3(4)
assert fwp(6) == f3wp(6)
assert fwp(4) == f3wp(4)
# ...
#------------------------------------------------------------------------------
def test_f4(language):
@types('int')
def f4(x):
a = x if x < 5 else 5 >> 2
return a
@types('int')
def f4wp(x):
a = (x if x < 5 else 5) >> 2
return a
f = epyccel(f4, language = language)
fwp = epyccel(f4wp, language = language)
# ...
assert f(6) == f4(6)
assert f(4) == f4(4)
assert fwp(6) == f4wp(6)
assert fwp(4) == f4wp(4)
# ...
#------------------------------------------------------------------------------
def test_f5(language):
@types('int')
def f5(x):
a = x if x < 5 else 5 if x == 5 else 5.5
return a
@types('int')
def f5wp(x):
a = x if x < 5 else (5 if x == 5 else 5.5)
return a
f = epyccel(f5, language = language)
fwp = epyccel(f5wp, language = language)
# ...
assert f(6) == f5(6)
assert f(4) == f5(4)
assert f(5) == f5(5)
assert fwp(6) == f5wp(6)
assert fwp(4) == f5wp(4)
assert fwp(5) == f5wp(5)
# ...
#------------------------------------------------------------------------------
def test_f6(language):
@types('int')
def f6(x):
# a = x if x < 0 else (1 if x < 5 else (complex(0, 1) if x == 5 else 6.5))
a = x if x < 0 else 1 if x < 5 else complex(0, 1) if x == 5 else 6.5
return a
@types('int')
def f6wp(x):
a = x if x < 0 else (1 if x < 5 else (complex(0, 1) if x == 5 else 6.5))
return a
f = epyccel(f6, language = language)
fwp = epyccel(f6wp, language = language)
# ...
assert f(6) == f6(6)
assert f(4) == f6(4)
assert f(5) == f6(5)
assert fwp(6) == f6wp(6)
assert fwp(4) == f6wp(4)
assert fwp(5) == f6wp(5)
# ...
#------------------------------------------------------------------------------
def test_f7(language):
@types('int')
def f7(x):
a = [1.,2.,3.] if x < 5 else [1.5,6.5,7.5]
return a[0]
@types('int')
def f7wp(x):
a = [1.,2.,3.] if x < 5 else ([1.5,6.5,7.5] if x > 5 else [3.1,9.5,2.8])
return a[0]
f = epyccel(f7, language = language)
fwp = epyccel(f7wp, language = language)
# ...
assert f(6) == f7(6)
assert f(4) == f7(4)
assert fwp(6) == f7wp(6)
assert fwp(4) == f7wp(4)
# ...
#------------------------------------------------------------------------------
def test_f8(language):
@types('int')
def f8(x):
a = (1+0j, 2+0j) if x < 5 else (complex(5, 1), complex(2, 2))
return a[0]
@types('int')
def f8wp(x):
a = (1+0j, 2+0j) if x < 5 else ((complex(5, 1), complex(2, 2)) if x > 5 else (complex(7, 2), complex(3, 3)) )
return a[0]
f = epyccel(f8, language = language)
fwp = epyccel(f8wp, language = language)
# ...
assert f(6) == f8(6)
assert f(4) == f8(4)
assert fwp(6) == f8wp(6)
assert fwp(4) == f8wp(4)
# ...
#------------------------------------------------------------------------------
def test_f9(language):
@types('int')
def f9(x):
a = 1 + 2 if x < 5 else 3
return a
@types('int')
def f9wp1(x):
a = 1 + (2 if x < 5 else 3)
return a
@types('int')
def f9wp2(x):
a = (1 + 2) if x < 5 else 3
return a
f = epyccel(f9, language = language)
fwp1 = epyccel(f9wp1, language = language)
fwp2 = epyccel(f9wp2, language = language)
# ...
assert f(6) == f9(6)
assert f(4) == f9(4)
assert fwp1(6) == f9wp1(6)
assert fwp1(4) == f9wp1(4)
assert fwp2(6) == f9wp2(6)
assert fwp2(4) == f9wp2(4)
# ...
#------------------------------------------------------------------------------
def test_f10(language):
@types('int')
def f10(x):
a = 2 if x < 5 else 3 + 1
return a
@types('int')
def f10wp1(x):
a = (2 if x < 5 else 3) + 1
return a
@types('int')
def f10wp2(x):
a = 2 if x < 5 else (3 + 1)
return a
f = epyccel(f10, language = language)
fwp1 = epyccel(f10wp1, language = language)
fwp2 = epyccel(f10wp2, language = language)
# ...
assert f(6) == f10(6)
assert f(4) == f10(4)
assert fwp1(6) == f10wp1(6)
assert fwp1(4) == f10wp1(4)
assert fwp2(6) == f10wp2(6)
assert fwp2(4) == f10wp2(4)
# ...
#------------------------------------------------------------------------------
def test_f11(language):
@types('int')
def f11(x):
a = 2 if (x + 2)*5 < 5 else 3
return a
f = epyccel(f11, language = language)
# ...
assert f(6) == f11(6)
assert f(-4) == f11(-4)
# ...
#------------------------------------------------------------------------------
def test_f12(language):
@types('int')
def f12(x):
a = [1.,2.,3.,4.] if x < 5 else [1.5,6.5,7.5]
return a[0]
@types('int')
def f12wp(x):
a = [1.,2.,3.] if x < 5 else ([1.5,6.5,7.5] if x > 5 else [3.1,9.5,2.8,2.9])
return a[0]
f = epyccel(f12, language = language)
fwp = epyccel(f12wp, language = language)
# ...
assert f(6) == f12(6)
assert f(4) == f12(4)
assert fwp(6) == f12wp(6)
assert fwp(4) == f12wp(4)
# ...
#------------------------------------------------------------------------------
@pytest.mark.parametrize( 'language', (
pytest.param("fortran", marks = [
pytest.mark.skip(reason="Can't return a string"),
pytest.mark.fortran]
),
pytest.param("c", marks = [
pytest.mark.skip(reason="Can't declare a string"),
pytest.mark.c]
),
pytest.param("python", marks = pytest.mark.python)
)
)
def test_f13(language):
def f13(b : bool):
a = 'hello' if b else 'world!'
return a
def f13wp(b1 : bool, b2 : bool):
a = 'hello' if b1 else ('world' if b2 else 'hello world')
return a
f = epyccel(f13, language = language)
fwp = epyccel(f13wp, language = language)
# ...
assert f(True) == f13(True)
assert f(False) == f13(False)
assert fwp(True,True) == f13wp(True,True)
assert fwp(True,False) == f13wp(True,False)
assert fwp(False,True) == f13wp(False,True)
assert fwp(False,False) == f13wp(False,False)
# ...
#------------------------------------------------------------------------------
|
numpy/_globals.py | leonarduschen/numpy | 603 | 11138517 | """
Module defining global singleton classes.
This module raises a RuntimeError if an attempt to reload it is made. In that
way the identities of the classes defined here are fixed and will remain so
even if numpy itself is reloaded. In particular, a function like the following
will still work correctly after numpy is reloaded::
def foo(arg=np._NoValue):
if arg is np._NoValue:
...
That was not the case when the singleton classes were defined in the numpy
``__init__.py`` file. See gh-7844 for a discussion of the reload problem that
motivated this module.
"""
__ALL__ = [
'ModuleDeprecationWarning', 'VisibleDeprecationWarning', '_NoValue'
]
# Disallow reloading this module so as to preserve the identities of the
# classes defined here.
if '_is_loaded' in globals():
raise RuntimeError('Reloading numpy._globals is not allowed')
_is_loaded = True
class ModuleDeprecationWarning(DeprecationWarning):
"""Module deprecation warning.
The nose tester turns ordinary Deprecation warnings into test failures.
That makes it hard to deprecate whole modules, because they get
imported by default. So this is a special Deprecation warning that the
nose tester will let pass without making tests fail.
"""
ModuleDeprecationWarning.__module__ = 'numpy'
class VisibleDeprecationWarning(UserWarning):
"""Visible deprecation warning.
By default, python will not show deprecation warnings, so this class
can be used when a very visible warning is helpful, for example because
the usage is most likely a user bug.
"""
VisibleDeprecationWarning.__module__ = 'numpy'
class _NoValueType:
"""Special keyword value.
The instance of this class may be used as the default value assigned to a
keyword if no other obvious default (e.g., `None`) is suitable,
Common reasons for using this keyword are:
- A new keyword is added to a function, and that function forwards its
inputs to another function or method which can be defined outside of
NumPy. For example, ``np.std(x)`` calls ``x.std``, so when a ``keepdims``
keyword was added that could only be forwarded if the user explicitly
specified ``keepdims``; downstream array libraries may not have added
the same keyword, so adding ``x.std(..., keepdims=keepdims)``
unconditionally could have broken previously working code.
- A keyword is being deprecated, and a deprecation warning must only be
emitted when the keyword is used.
"""
__instance = None
def __new__(cls):
# ensure that only one instance exists
if not cls.__instance:
cls.__instance = super(_NoValueType, cls).__new__(cls)
return cls.__instance
# needed for python 2 to preserve identity through a pickle
def __reduce__(self):
return (self.__class__, ())
def __repr__(self):
return "<no value>"
_NoValue = _NoValueType()
|
sympy/physics/mechanics/tests/test_method.py | utkarshdeorah/sympy | 8,323 | 11138521 | <filename>sympy/physics/mechanics/tests/test_method.py
from sympy.physics.mechanics.method import _Methods
from sympy.testing.pytest import raises
def test_method():
raises(TypeError, lambda: _Methods())
|
components/test/data/autofill/automated_integration/autofill_test/suite.py | zealoussnow/chromium | 14,668 | 11138558 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chrome Autofill Test Flow
Execute a set of autofill tasks in a fresh ChromeDriver instance that has been
pre-loaded with some default profile.
Requires:
- Selenium python bindings
http://selenium-python.readthedocs.org/
- ChromeDriver
https://sites.google.com/a/chromium.org/chromedriver/downloads
The ChromeDriver executable must be available on the search PATH.
- Chrome
"""
import importlib
import unittest
# Local Imports
from autofill_task.autofill_task import AutofillTask
from testdata import profile_data
from .case import AutofillTestCase
class AutofillTestSuite(unittest.TestSuite):
"""Represents an aggregation of individual Autofill test cases.
Attributes:
user_data_dir: Path string for the writable directory in which profiles
should be stored.
chrome_binary: Path string to the Chrome binary that should be used by
ChromeDriver.
If None then it will use the PATH to find a binary.
test_class: Name of the test class that should be run.
If this is set, then only the specified class will be executed
module: The module to load test cases from. This is relative to the tasks
package.
profile: Dict of profile data that acts as the master source for
validating autofill behaviour. If not specified then default profile data
will be used from testdata.profile_data.
debug: Whether debug output should be printed (False if not specified).
"""
def __init__(self, user_data_dir, chrome_binary=None, test_class=None,
module='sites', profile=None, debug=False):
if profile is None:
profile = profile_data.DEFAULT
super(AutofillTestSuite, self).__init__()
self._test_class = test_class
self._profile = profile
self._debug = debug
module = 'tasks.%s' % module
try:
importlib.import_module(module)
except ImportError:
print 'Unable to load %s from tasks.' % module
raise
self._generate_tests(user_data_dir, chrome_binary)
def _generate_tests(self, user_data_dir, chrome_binary=None):
task_classes = AutofillTask.__subclasses__()
tests = []
if self._test_class:
for task in task_classes:
if task.__name__ == self._test_class:
test = AutofillTestCase(task, user_data_dir, self._profile,
chrome_binary=chrome_binary,
debug=self._debug)
self.addTest(test)
return
raise ValueError('Autofill Test \'%s\' could not be found.' %
self._test_class)
else:
for task in task_classes:
tests.append(AutofillTestCase(task, user_data_dir, self._profile,
chrome_binary=chrome_binary,
debug=self._debug))
self.addTests(tests)
|
tests/guinea-pigs/nose/doctests/namespace1/d.py | djeebus/teamcity-python | 105 | 11138566 | <reponame>djeebus/teamcity-python<gh_stars>100-1000
def multiply(a, b):
"""
'multiply' multiplies two numbers and returns the result.
>>> multiply(5, 10)
50
>>> multiply(-1, 1)
-1
>>> multiply(0.5, 1.5)
0.75
"""
return a*b
|
python/oneflow/compatible/single_client/test/ops/test_unsorted_batch_segment_sum.py | wangyuyue/oneflow | 3,285 | 11138572 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import test_global_storage
from test_util import GenArgList
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
def _check(test_case, data, segment_ids, out_shape, out):
test_case.assertEqual(out.shape, out_shape)
ref = np.zeros_like(out)
for (idx, i) in np.ndenumerate(segment_ids):
out_idx = list(idx)
out_idx[-1] = i
out_idx = tuple(out_idx)
ref[out_idx] += data[idx]
test_case.assertTrue(np.allclose(ref, out, atol=1e-05, rtol=1e-05))
def _check_bw(test_case, params, indices, out_shape, out):
ref = np.zeros_like(out)
for (idx, i) in np.ndenumerate(indices):
in_idx = list(idx)
in_idx[-1] = i
in_idx = tuple(in_idx)
ref[idx] += params[in_idx]
test_case.assertTrue(np.array_equal(ref, out))
def _gen_segment_ids(out_shape, num_segments, segment_ids_shape):
axis = len(segment_ids_shape) - 1
return np.random.randint(
low=0, high=out_shape[axis], size=segment_ids_shape, dtype=np.int32
)
def _gen_data(out_shape, num_segments, segment_ids_shape):
axis = len(segment_ids_shape) - 1
data_shape = out_shape[0:axis] + (segment_ids_shape[axis],) + out_shape[axis + 1 :]
return np.random.rand(*data_shape).astype(np.float32)
def _make_unsoted_segment_sum_fn(device, data, segment_ids, num_segments):
flow.clear_default_session()
@flow.global_function(type="train", function_config=func_config)
def unsorted_batch_segment_sum_job(
data: oft.Numpy.Placeholder(data.shape, dtype=flow.float),
segment_ids: oft.Numpy.Placeholder(segment_ids.shape, dtype=flow.int32),
):
with flow.scope.placement(device, "0:0"):
x = flow.get_variable(
"data",
shape=data.shape,
dtype=flow.float32,
initializer=flow.constant_initializer(0),
)
data = x + data
res = flow.math.unsorted_batch_segment_sum(
data=data, segment_ids=segment_ids, num_segments=num_segments
)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
).minimize(res)
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch_diff(res, test_global_storage.Setter("loss_diff"))
return res
return unsorted_batch_segment_sum_job(data, segment_ids)
def _run_test(test_case, device, out_shape, num_segments, segment_ids_shape):
segment_ids = _gen_segment_ids(out_shape, num_segments, segment_ids_shape)
data = _gen_data(out_shape, num_segments, segment_ids_shape)
unsorted_batch_segment_sum_out = _make_unsoted_segment_sum_fn(
device, data, segment_ids, num_segments
).get()
out_ndarray = unsorted_batch_segment_sum_out.numpy()
grad_in_ndarray = test_global_storage.Get("x_diff")
grad_out_ndarray = test_global_storage.Get("loss_diff")
_check(test_case, data, segment_ids, out_shape, out_ndarray)
_check_bw(
test_case, grad_out_ndarray, segment_ids, grad_in_ndarray.shape, grad_in_ndarray
)
@flow.unittest.skip_unless_1n1d()
class TestUnsortedBatchSegmentSum(flow.unittest.TestCase):
def test_unsorted_batch_segment_sum(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["out_shape"] = [(2, 4, 7, 6)]
arg_dict["num_segments"] = [7]
arg_dict["segment_ids_shape"] = [(2, 4, 5)]
for arg in GenArgList(arg_dict):
_run_test(test_case, *arg)
if __name__ == "__main__":
unittest.main()
|
opennmt/tests/api_test.py | gcervantes8/OpenNMT-tf | 1,363 | 11138574 | <gh_stars>1000+
import inspect
import tensorflow as tf
import opennmt
class APITest(tf.test.TestCase):
def testSubmodules(self):
def _check(module):
self.assertTrue(inspect.ismodule(module))
_check(opennmt.data)
_check(opennmt.decoders)
_check(opennmt.encoders)
_check(opennmt.inputters)
_check(opennmt.layers)
_check(opennmt.models)
_check(opennmt.optimizers)
_check(opennmt.schedules)
_check(opennmt.tokenizers)
_check(opennmt.utils)
if __name__ == "__main__":
tf.test.main()
|
src/amuse/io/gadget.py | rknop/amuse | 131 | 11138595 | <gh_stars>100-1000
import struct
import numpy
from collections import namedtuple
from amuse.io import base
from amuse.units import units
from amuse.units import nbody_system
from amuse.support.core import late
from amuse import datamodel
class GadgetFileFormatProcessor(base.FortranFileFormatProcessor):
"""
Process a Gadget binary data file
"""
provided_formats = ['gadget']
GAS = 0
HALO = 1
DISK = 2
BULGE = 3
STARS = 4
BNDRY = 5
@late
def header_format(self):
collate = []
collate.append(self.endianness)
for name, times, formatchar in self.header_struct_format:
for t in range(times):
collate.append(formatchar)
return ''.join(collate)
@base.format_option
def header_struct_format(self):
"""The format of the header structure of the gadget file."""
return (
('Npart', 6, 'I'),
('Massarr', 6, 'd'),
('Time', 1, 'd'),
('Redshift', 1, 'd'),
('FlagSfr', 1, 'i'),
('FlagFeedback', 1, 'i'),
('Nall', 6, 'i'),
('FlagCooling', 1, 'i'),
('NumFiles', 1, 'i'),
('BoxSize', 1, 'd'),
('Omega0', 1, 'd'),
('OmegaLambda', 1, 'd'),
('HubbleParam', 1, 'd'),
('FlagAge', 1, 'd'),
('FlagMetals', 1, 'd'),
('NallHW', 6, 'i'),
('flag_entr_ics', 1, 'i'),
)
@base.format_option
def has_potential_energy(self):
"""Set to true if the file has a potential energy block"""
return False
@base.format_option
def has_acceleration(self):
"""Set to true if the file has a acceleration block"""
return False
@base.format_option
def has_rate_of_entropy_production(self):
"""Set to true if the file has a block with the
rate of change of the entropic function of each gas particle
"""
return False
@base.format_option
def has_timestep(self):
"""Set to true if the file has a block with the
individual timestep for each particle.
"""
return False
@base.format_option
def is_initial_conditions_format(self):
"""Set to true if the file contains
initial conditions. An initial conditions
gadget file contains less data.
"""
return True
@base.format_option
def ids_are_keys(self):
"""Set to True if the file contains
correct keys. Set to False to generate
the keys in amuse and prove an id attribute for
the id's in the gadget file"""
return True
@base.format_option
def equal_mass_array(self):
"""If filled with an array with masses > 0.0
assume equal mass for the corresponding set"""
return ([0.0] * 6) | nbody_system.mass
@base.format_option
def ids_are_long(self):
"""Set to true the ids will be written
as longs in the gadget file"""
return True
@base.format_option
def return_header(self):
"""Set to True to return both the particles and the header from the gadget file"""
return False
@base.format_option
def write_header_from(self):
"""Pass a namedtuple to store non-default values in the header of the gadget file"""
return None
@base.format_option
def convert_gadget_w_to_velocity(self):
"""Set to True to convert the w=sqrt(a)*dx/dt to (comoving) velocity in comoving integrations"""
return False
@late
def header_size(self):
return struct.calcsize(self.header_format)
@late
def struct_class_name(self):
return 'GadgetHeader'
@late
def struct_class(self):
collate = []
for name, times, formatchar in self.header_struct_format:
collate.append(name)
attribute_names = ' '.join(collate)
return namedtuple(self.struct_class_name, attribute_names)
@late
def total_number_of_particles(self):
return sum(self.header_struct.Npart)
@late
def total_number_of_particles_with_variable_masses(self):
result = 0
for x, n in zip(self.header_struct.Massarr, self.header_struct.Npart):
if x == 0.0:
result += n
return result
@late
def number_of_gas_particles(self):
return self.header_struct.Npart[self.GAS]
def collect_values(self, values):
offset = 0
result = []
for name, times, formatchar in self.header_struct_format:
if times > 1:
result.append(values[offset: offset+times])
else:
result.append(values[offset])
offset += times
return result
def load_header(self, file):
header_bytes = self.read_fortran_block(file)
values = struct.unpack(self.header_format, header_bytes[0:self.header_size])
values = self.collect_values(values)
self.header_struct = self.struct_class(*values)
def load_body(self, file):
self.positions = self.read_fortran_block_float_vectors(file)
self.velocities = self.read_fortran_block_float_vectors(file)
id_bytes = self.read_fortran_block(file)
if len(id_bytes) == 4*self.total_number_of_particles:
self.ids = numpy.frombuffer(id_bytes, dtype=self.uint_type)
else:
self.ids = numpy.frombuffer(id_bytes, dtype=self.ulong_type)
if self.total_number_of_particles_with_variable_masses > 0:
self.masses = self.read_fortran_block_floats(file)
else:
self.masses = None
if self.number_of_gas_particles > 0:
self.u = self.read_fortran_block_floats(file)[:self.number_of_gas_particles]
#print self.u, self.number_of_gas_particles, len(self.u)
else:
self.u = None
if self.is_initial_conditions_format:
self.density = None
self.hsml = None
self.pot = None
self.acc = None
self.da_dt = None
self.dt = None
return
if self.number_of_gas_particles > 0:
self.density = self.read_fortran_block_floats(file)
self.hsml = self.read_fortran_block_floats(file)
else:
self.density = None
self.hsml = None
if self.has_potential_energy:
self.pot = self.read_fortran_block_floats(file)
else:
self.pot = None
if self.has_acceleration:
self.acc = self.read_fortran_block_floats(file)
else:
self.acc = None
if self.has_rate_of_entropy_production:
self.da_dt = self.read_fortran_block_floats(file)
else:
self.da_dt = None
if self.has_timestep:
self.dt = self.read_fortran_block_floats(file)
else:
self.dt = None
def new_sets_from_arrays(self):
offset = 0
ids_per_set = []
for x in self.header_struct.Npart:
ids_per_set.append(self.ids[offset:offset+x])
offset += x
if self.ids_are_keys:
sets = [datamodel.Particles(len(x), keys=x) for x in ids_per_set]
else:
sets = [datamodel.Particles(len(x)) for x in ids_per_set]
for set, x in zip(sets, ids_per_set):
if len(set) > 0:
set.id = x
offset = 0
for x in sets:
length = len(x)
if length == 0:
continue
x.position = nbody_system.length.new_quantity(self.positions[offset:offset+length])
x.velocity = nbody_system.speed.new_quantity(self.velocities[offset:offset+length])
if self.convert_gadget_w_to_velocity:
x.velocity *= numpy.sqrt(1.0 + self.header_struct.Redshift)
if not self.pot is None:
x.potential_energy = nbody_system.energy.new_quantity(self.pot[offset:offset+length])
if not self.acc is None:
x.acceleration = nbody_system.acceleration.new_quantity(self.acc[offset:offset+length])
if not self.dt is None:
x.timestep = nbody_system.time.new_quantity(self.dt[offset:offset+length])
offset += length
offset = 0
for x, mass in zip(sets, self.header_struct.Massarr):
length = len(x)
if length == 0:
continue
if mass == 0.0:
x.mass = nbody_system.mass.new_quantity(self.masses[offset:offset+length])
offset += length
else:
x.mass = nbody_system.mass.new_quantity(mass)
if self.number_of_gas_particles > 0:
gas_set = sets[self.GAS]
unit = (nbody_system.length / nbody_system.time) ** 2
gas_set.u = unit.new_quantity(self.u)
unit = nbody_system.mass / nbody_system.length ** 3
if not self.density is None:
gas_set.rho = unit.new_quantity(self.density)
if not self.hsml is None:
gas_set.h_smooth = nbody_system.length.new_quantity(self.hsml)
return sets
def load_file(self, file):
self.load_header(file)
self.load_body(file)
attribute_names = ["gas","halo","disk","bulge","stars","bndry"]
values = self.new_sets_from_arrays()
if self.return_header:
attribute_names += [name for name, times, formatchar in self.header_struct_format]
values += list(self.header_struct)
return namedtuple("GadgetData", attribute_names)(*values)
@late
def sets_to_save(self):
if isinstance(self.set, (tuple, list)):
sets_to_save = self.set
elif hasattr(self.set, 'key'):
sets_to_save = (self.set, )
else:
raise Exception("The Gadget binary file writer can write a particle set or a list of sets but nothing else")
if len(sets_to_save) < 6:
sets_to_save = list(sets_to_save)
sets_to_save.extend([()] * (6 - len(sets_to_save)))
return sets_to_save
def store_file(self, file):
self.store_header(file)
self.store_body(file)
def header_field_value(self, name):
if hasattr(self.write_header_from, name):
return getattr(self.write_header_from, name)
else:
return self.default_header_field_value(name)
def default_header_field_value(self, name):
return self.default_header[name]
def store_header(self, file):
self.default_header = dict(
Massarr = list(self.equal_mass_array.value_in(nbody_system.mass)),
Time = 0.0,
Redshift = 0.0,
FlagSfr = 0,
FlagFeedback = 0,
Nall = [len(x) for x in self.sets_to_save],
FlagCooling = 0,
NumFiles = 1,
BoxSize = 0,
Omega0 = 0,
OmegaLambda = 0,
HubbleParam = 0,
FlagAge = 0.0,
FlagMetals = 0.0,
NallHW = [0]*6,
flag_entr_ics = 0
)
if self.write_header_from is None:
self.header_field_value = self.default_header_field_value
self.header_struct = self.struct_class(
Npart = [len(x) for x in self.sets_to_save],
Massarr = list(self.header_field_value("Massarr")),
Time = self.header_field_value("Time"),
Redshift = self.header_field_value("Redshift"),
FlagSfr = self.header_field_value("FlagSfr"),
FlagFeedback = self.header_field_value("FlagFeedback"),
Nall = list(self.header_field_value("Nall")),
FlagCooling = self.header_field_value("FlagCooling"),
NumFiles = self.header_field_value("NumFiles"),
BoxSize = self.header_field_value("BoxSize"),
Omega0 = self.header_field_value("Omega0"),
OmegaLambda = self.header_field_value("OmegaLambda"),
HubbleParam = self.header_field_value("HubbleParam"),
FlagAge = self.header_field_value("FlagAge"),
FlagMetals = self.header_field_value("FlagMetals"),
NallHW = list(self.header_field_value("NallHW")),
flag_entr_ics = self.header_field_value("flag_entr_ics")
)
bytes = bytearray(256)
parts = list()
for x in list(self.header_struct):
if isinstance(x, list):
parts.extend(x)
else:
parts.append(x)
struct.pack_into(self.header_format, bytes, 0, *parts)
self.write_fortran_block(file, bytes)
def _pack_sets(self, attributename, unit = None):
result = []
for x in self.sets_to_save:
if len(x) > 0:
if unit is None:
result.extend(getattr(x,attributename))
else:
result.extend(getattr(x,attributename).value_in(unit))
return result
def store_body(self, file):
self.positions = self._pack_sets('position', nbody_system.length)
self.velocities = self._pack_sets('velocity', nbody_system.speed)
if self.ids_are_keys:
self.ids = self._pack_sets('key')
else:
self.ids = self._pack_sets('id')
self.masses = []
for equal_mass, x in zip(self.equal_mass_array, self.sets_to_save):
if len(x) > 0 and not equal_mass > (0.0 | nbody_system.mass):
self.masses.extend(x.mass.value_in(nbody_system.mass))
if len(self.masses) == 0:
self.masses = None
number_of_gas_particles = len(self.sets_to_save[self.GAS])
if number_of_gas_particles > 0:
self.u = self.sets_to_save[0].u.value_in(nbody_system.potential)
else:
self.u = None
self.write_fortran_block_float_vectors(file, self.positions)
self.write_fortran_block_float_vectors(file, self.velocities)
if self.ids_are_long:
self.write_fortran_block_ulongs(file, self.ids)
else:
self.write_fortran_block_uints(file, self.ids)
if not self.masses is None:
self.write_fortran_block_floats(file, self.masses)
if not self.u is None:
self.write_fortran_block_floats(file, self.u)
if self.is_initial_conditions_format:
return
if number_of_gas_particles > 0:
self.density = self.sets_to_save[0].rho.value_in(nbody_system.density)
self.hsml = self.sets_to_save[0].h_smooth.value_in(nbody_system.length)
else:
self.density = None
self.hsml = None
if self.has_potential_energy:
self.pot = self._pack_sets('potential_energy', nbody_system.energy)
else:
self.pot = None
if self.has_acceleration:
self.acc = self._pack_sets('acceleration', nbody_system.acceleration)
else:
self.acc = None
if self.has_rate_of_entropy_production:
self.da_dt = None
else:
self.da_dt = None
if self.has_timestep:
self.dt = self._pack_sets('timestep', nbody_system.time)
else:
self.dt = None
if not self.density is None:
self.write_fortran_block_floats(file, self.density)
if not self.hsml is None:
self.write_fortran_block_floats(file, self.hsml)
if not self.pot is None:
self.write_fortran_block_floats(file, self.pot)
if not self.acc is None:
self.write_fortran_block_floats(file, self.acc)
if not self.da_dt is None:
self.write_fortran_block_floats(file, self.da_dt)
if not self.dt is None:
self.write_fortran_block_floats(file, self.dt)
|
docs/patch.py | gselzer/magicgui | 185 | 11138623 | from pathlib import Path
from sphinx_jupyterbook_latex import transforms
text = Path(transforms.__file__).read_text()
text = text.replace('tocnode.attributes["hidden"]', 'tocnode.attributes.get("hidden")')
Path(transforms.__file__).write_text(text)
|
insights/parsers/crypto_policies.py | lhuett/insights-core | 121 | 11138624 | """
crypto-policies - files in ``/etc/crypto-policies/back-ends/``
==============================================================
This is a collection of parsers that all deal with the generated configuration
files under the ``/etc/crypto-policies/back-ends/`` folder. Parsers included
in this module are:
CryptoPoliciesConfig - file ``/etc/crypto-policies/config``
-----------------------------------------------------------
CryptoPoliciesStateCurrent - file ``/etc/crypto-policies/state/current``
------------------------------------------------------------------------
CryptoPoliciesOpensshserver - file ``/etc/crypto-policies/back-ends/opensshserver.config``
------------------------------------------------------------------------------------------
CryptoPoliciesBind - file ``/etc/crypto-policies/back-ends/bind.config``
------------------------------------------------------------------------
"""
from insights import Parser, parser, SysconfigOptions
from insights.specs import Specs
from insights.parsers import SkipException, get_active_lines
@parser(Specs.crypto_policies_config)
class CryptoPoliciesConfig(Parser):
"""
This parser reads the ``/etc/crypto-policies/config`` file.
The contents of the file is a single-line value, available
in the ``value`` property.
Sample Input::
LEGACY
Examples:
>>> cp_c.value
'LEGACY'
"""
def parse_content(self, content):
if not content:
raise SkipException("/etc/crypto-policies/config is empty")
self.value = get_active_lines(content)[0]
@parser(Specs.crypto_policies_state_current)
class CryptoPoliciesStateCurrent(Parser):
"""
This parser reads the ``/etc/crypto-policies/state/current`` file.
The contents of the file is a single-line value, available
in the ``value`` property.
Sample Input::
LEGACY
Examples:
>>> cp_sc.value
'LEGACY'
"""
def parse_content(self, content):
if not content:
raise SkipException("/etc/crypto-policies/state/current is empty")
self.value = get_active_lines(content)[0]
@parser(Specs.crypto_policies_opensshserver)
class CryptoPoliciesOpensshserver(SysconfigOptions):
"""
This parser reads the ``/etc/crypto-policies/back-ends/opensshserver.config``
file. It uses the ``SysconfigOptions`` parser class to convert the file into
a dictionary of options. It also provides the ``options`` property as a helper
to retrieve the ``CRYPTO_POLICY`` variable.
Sample Input::
CRYPTO_POLICY='[email protected],3des-cbc -oMACs=um<EMAIL>28-etm<EMAIL>'
Examples:
>>> 'CRYPTO_POLICY' in cp_os
True
>>> cp_os.options
'-oCiphers=aes256-gcm@<EMAIL>,3des-cbc -oMACs=umac-128-etm@<EMAIL>'
"""
@property
def options(self):
""" (union[str, None]): The value of the ``CRYPTO_POLICY`` variable if it exists, else None."""
return self.data.get('CRYPTO_POLICY', None)
@parser(Specs.crypto_policies_bind)
class CryptoPoliciesBind(Parser):
"""
This parser reads the ``/etc/crypto-policies/back-ends/bind.config`` file.
The sections ``disable-algorithms`` and ``disable-ds-digests`` are in the
properties ``disable_algorithms`` and ``disable_ds_digests``.
Sample Input::
disable-algorithms "." {
RSAMD5;
DSA;
};
disable-ds-digests "." {
GOST;
};
Examples:
>>> 'GOST' in cp_bind.disable_ds_digests
True
>>> cp_bind.disable_algorithms
['RSAMD5', 'DSA']
"""
def parse_content(self, content):
if not content:
raise SkipException("/etc/crypto-policies/back-ends/bind.config is empty")
self.value = content
in_da = False
in_ddd = False
da = []
ddd = []
for line in self.value:
if line.strip().lower().startswith("disable-algorithms"):
in_da = True
continue
if line.strip().lower().startswith("disable-ds-digests"):
in_ddd = True
continue
if line.strip().startswith("}"):
in_da = False
in_ddd = False
continue
algo = line.strip().strip(''';'"''')
if in_da:
da.append(algo)
if in_ddd:
ddd.append(algo)
self.disable_algorithms = da
self.disable_ds_digests = ddd
|
lib/pytube/request.py | Rinka433/Fxc7-Api | 119 | 11138646 | # -*- coding: utf-8 -*-
"""Implements a simple wrapper around urlopen."""
import logging
from functools import lru_cache
from http.client import HTTPResponse
from typing import Iterable, Dict, Optional
from urllib.request import Request
from urllib.request import urlopen
logger = logging.getLogger(__name__)
def _execute_request(
url: str, method: Optional[str] = None, headers: Optional[Dict[str, str]] = None
) -> HTTPResponse:
base_headers = {"User-Agent": "Mozilla/5.0"}
if headers:
base_headers.update(headers)
if url.lower().startswith("http"):
request = Request(url, headers=base_headers, method=method)
else:
raise ValueError("Invalid URL")
return urlopen(request) # nosec
def get(url) -> str:
"""Send an http GET request.
:param str url:
The URL to perform the GET request for.
:rtype: str
:returns:
UTF-8 encoded string of response
"""
return _execute_request(url).read().decode("utf-8")
def stream(
url: str, chunk_size: int = 4096, range_size: int = 9437184
) -> Iterable[bytes]:
"""Read the response in chunks.
:param str url: The URL to perform the GET request for.
:param int chunk_size: The size in bytes of each chunk. Defaults to 4KB
:param int range_size: The size in bytes of each range request. Defaults to 9MB
:rtype: Iterable[bytes]
"""
file_size: int = range_size # fake filesize to start
downloaded = 0
while downloaded < file_size:
stop_pos = min(downloaded + range_size, file_size) - 1
range_header = f"bytes={downloaded}-{stop_pos}"
response = _execute_request(url, method="GET", headers={"Range": range_header})
if file_size == range_size:
try:
content_range = response.info()["Content-Range"]
file_size = int(content_range.split("/")[1])
except (KeyError, IndexError, ValueError) as e:
logger.error(e)
while True:
chunk = response.read(chunk_size)
if not chunk:
break
downloaded += len(chunk)
yield chunk
return # pylint: disable=R1711
@lru_cache(maxsize=None)
def filesize(url: str) -> int:
"""Fetch size in bytes of file at given URL
:param str url: The URL to get the size of
:returns: int: size in bytes of remote file
"""
return int(head(url)["content-length"])
def head(url: str) -> Dict:
"""Fetch headers returned http GET request.
:param str url:
The URL to perform the GET request for.
:rtype: dict
:returns:
dictionary of lowercase headers
"""
response_headers = _execute_request(url, method="HEAD").info()
return {k.lower(): v for k, v in response_headers.items()}
|
evan69/0004/0004.py | saurabh896/python-1 | 3,976 | 11138667 | <filename>evan69/0004/0004.py
import collections,re
import sys
def cal(filename = 'in.txt'):
print 'now processing:' + filename + '......'
f = open(filename,'r')
data = f.read()
dic = collections.defaultdict(lambda :0)
data = re.sub(r'[\W\d]',' ',data)
data = data.lower()
datalist = data.split(' ')
for item in datalist:
dic[item] += 1
del dic['']
return dic
try:
print sorted(cal().items())
except:
print 'no input file'
|
aiida/repository/common.py | azadoks/aiida-core | 180 | 11138674 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Module with resources common to the repository."""
import enum
import typing
__all__ = ('FileType', 'File')
class FileType(enum.Enum):
"""Enumeration to represent the type of a file object."""
DIRECTORY = 0
FILE = 1
class File():
"""Data class representing a file object."""
def __init__(
self,
name: str = '',
file_type: FileType = FileType.DIRECTORY,
key: typing.Union[str, None] = None,
objects: typing.Dict[str, 'File'] = None
) -> None:
"""Construct a new instance.
:param name: The final element of the file path
:param file_type: Identifies whether the File is a file or a directory
:param key: A key to map the file to its contents in the backend repository (file only)
:param objects: Mapping of child names to child Files (directory only)
:raises ValueError: If a key is defined for a directory,
or objects are defined for a file
"""
if not isinstance(name, str):
raise TypeError('name should be a string.')
if not isinstance(file_type, FileType):
raise TypeError('file_type should be an instance of `FileType`.')
if key is not None and not isinstance(key, str):
raise TypeError('key should be `None` or a string.')
if objects is not None and any(not isinstance(obj, self.__class__) for obj in objects.values()):
raise TypeError('objects should be `None` or a dictionary of `File` instances.')
if file_type == FileType.DIRECTORY and key is not None:
raise ValueError('an object of type `FileType.DIRECTORY` cannot define a key.')
if file_type == FileType.FILE and objects is not None:
raise ValueError('an object of type `FileType.FILE` cannot define any objects.')
self._name = name
self._file_type = file_type
self._key = key
self._objects = objects or {}
@classmethod
def from_serialized(cls, serialized: dict, name='') -> 'File':
"""Construct a new instance from a serialized instance.
:param serialized: the serialized instance.
:return: the reconstructed file object.
"""
if 'k' in serialized:
file_type = FileType.FILE
key = serialized['k']
objects = None
else:
file_type = FileType.DIRECTORY
key = None
objects = {name: File.from_serialized(obj, name) for name, obj in serialized.get('o', {}).items()}
instance = cls.__new__(cls)
instance.__init__(name, file_type, key, objects)
return instance
def serialize(self) -> dict:
"""Serialize the metadata into a JSON-serializable format.
.. note:: the serialization format is optimized to reduce the size in bytes.
:return: dictionary with the content metadata.
"""
if self.file_type == FileType.DIRECTORY:
if self.objects:
return {'o': {key: obj.serialize() for key, obj in self.objects.items()}}
return {}
return {'k': self.key}
@property
def name(self) -> str:
"""Return the name of the file object."""
return self._name
@property
def file_type(self) -> FileType:
"""Return the file type of the file object."""
return self._file_type
def is_file(self) -> bool:
"""Return whether this instance is a file object."""
return self.file_type == FileType.FILE
def is_dir(self) -> bool:
"""Return whether this instance is a directory object."""
return self.file_type == FileType.DIRECTORY
@property
def key(self) -> typing.Union[str, None]:
"""Return the key of the file object."""
return self._key
@property
def objects(self) -> typing.Dict[str, 'File']:
"""Return the objects of the file object."""
return self._objects
def __eq__(self, other) -> bool:
"""Return whether this instance is equal to another file object instance."""
if not isinstance(other, self.__class__):
return False
equal_attributes = all(getattr(self, key) == getattr(other, key) for key in ['name', 'file_type', 'key'])
equal_object_keys = sorted(self.objects) == sorted(other.objects)
equal_objects = equal_object_keys and all(obj == other.objects[key] for key, obj in self.objects.items())
return equal_attributes and equal_objects
def __repr__(self):
args = (self.name, self.file_type.value, self.key, self.objects.items())
return 'File<name={}, file_type={}, key={}, objects={}>'.format(*args)
|
dbaas/logical/models.py | globocom/database-as-a-service | 303 | 11138678 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import simple_audit
import logging
import datetime
from datetime import date, timedelta
from django.db import models, transaction, Error
from django.db.models.signals import pre_save, post_save, pre_delete
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.utils.functional import cached_property
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields.encrypted import EncryptedCharField
from util import slugify, make_db_random_password
from util.models import BaseModel
from physical.models import DatabaseInfra, Environment
from drivers import factory_for
from system.models import Configuration
from account.models import Team
from drivers.base import DatabaseStatus
from drivers.errors import ConnectionError
from logical.validators import database_name_evironment_constraint
from notification.models import TaskHistory
LOG = logging.getLogger(__name__)
KB_FACTOR = 1.0 / 1024.0
MB_FACTOR = 1.0 / 1024.0 / 1024.0
GB_FACTOR = 1.0 / 1024.0 / 1024.0 / 1024.0
class Project(BaseModel):
name = models.CharField(
verbose_name=_("Project name"), max_length=100, unique=True)
description = models.TextField(
verbose_name=_("Description"), null=True, blank=True)
is_active = models.BooleanField(
verbose_name=_("Is project active"), default=True)
slug = models.SlugField()
def __unicode__(self):
return "%s" % self.name
class Meta:
permissions = (
("view_project", "Can view projects"),
)
ordering = ['name']
class DatabaseAliveManager(models.Manager):
def get_query_set(self):
return Database.objects.filter(is_in_quarantine=False)
class DatabaseHistory(models.Model):
database_id = models.IntegerField(db_index=True)
environment = models.CharField(
verbose_name=_("environment"), max_length=20
)
engine = models.CharField(
verbose_name=_("engine"), max_length=100
)
name = models.CharField(
verbose_name=_("name"), max_length=200
)
project = models.CharField(
verbose_name=_("project"), max_length=100
)
team = models.CharField(
verbose_name=_("team"), max_length=100
)
databaseinfra_name = models.CharField(
verbose_name=_("databaseinfra_name"), max_length=100
)
plan = models.CharField(
verbose_name=_("plan"), max_length=100
)
disk_size_kb = models.PositiveIntegerField(verbose_name=_("Size KB"))
has_persistence = models.BooleanField(
verbose_name="Disk persistence", default=True
)
created_at = models.DateTimeField(
verbose_name=_("created_at"))
deleted_at = models.DateTimeField(
verbose_name=_("deleted_at"), auto_now_add=True)
description = models.TextField(
verbose_name=_("Description"), null=True, blank=True
)
class Database(BaseModel):
DEAD = 0
ALIVE = 1
INITIALIZING = 2
ALERT = 3
DB_STATUS = (
(DEAD, 'Dead'),
(ALIVE, 'Alive'),
(INITIALIZING, 'Initializing'),
(ALERT, 'Alert')
)
name = models.CharField(
verbose_name=_("Database name"), max_length=100, db_index=True
)
databaseinfra = models.ForeignKey(
DatabaseInfra, related_name="databases", on_delete=models.PROTECT
)
project = models.ForeignKey(
Project, related_name="databases", on_delete=models.PROTECT, null=True,
blank=True
)
team = models.ForeignKey(
Team, related_name="databases", null=True, blank=True,
help_text=_("Team that is accountable for the database")
)
is_in_quarantine = models.BooleanField(
verbose_name=_("Is database in quarantine?"), default=False
)
quarantine_dt = models.DateField(
verbose_name=_("Quarantine date"), null=True, blank=True,
editable=False
)
description = models.TextField(
verbose_name=_("Description"), null=True, blank=True
)
status = models.IntegerField(choices=DB_STATUS, default=2)
used_size_in_bytes = models.FloatField(default=0.0)
environment = models.ForeignKey(
Environment, related_name="databases", on_delete=models.PROTECT,
db_index=True
)
backup_path = models.CharField(
verbose_name=_("Backup path"), max_length=300, null=True, blank=True,
help_text=_("Full path to backup file")
)
subscribe_to_email_events = models.BooleanField(
verbose_name=_("Subscribe to email events"), default=True,
help_text=_(
"Check this box if you'd like to receive information "
"regarding this database by email."
)
)
disk_auto_resize = models.BooleanField(
verbose_name=_("Disk auto resize"), default=True,
help_text=_("When marked, the disk will be resized automatically.")
)
is_protected = models.BooleanField(
verbose_name=_("Protected"), default=False,
help_text=_("When marked, the database can not be deleted.")
)
quarantine_user = models.ForeignKey(
User, related_name='databases_quarantine',
null=True, blank=True, editable=False
)
def validate_unique(self, *args, **kwargs):
''' Validate if database name is unique
in environemnt stage'''
super(Database, self).validate_unique(*args, **kwargs)
if not any([
hasattr(self, "environment"),
hasattr(self, "name")]) or self.id:
return
environment = Environment.objects.filter(pk=self.environment_id)
if not environment.exists():
return
environment = environment[0]
db_check = Database.objects.filter(
name=self.name,
environment__stage=environment.stage
)
if db_check.exists():
raise ValidationError({
"name": [
"Name %s is alredy been used in the %s environment" % (
self.name,
Environment.get_stage_by_id(self.environment.stage)
)
]
})
def team_contact(self):
if self.team:
return self.team.emergency_contacts
team_contact.short_description = 'Emergency contacts'
objects = models.Manager()
alive = DatabaseAliveManager()
quarantine_time = Configuration.get_by_name_as_int(
'quarantine_retention_days'
)
def __unicode__(self):
return u"{}".format(self.name)
class Meta:
permissions = (
("can_manage_quarantine_databases",
"Can manage databases in quarantine"),
("view_database", "Can view databases"),
("upgrade_mongo24_to_30",
"Can upgrade mongoDB version from 2.4 to 3.0"),
("upgrade_database", "Can upgrade databases"),
("configure_ssl", "Can configure SSL"),
)
unique_together = (
('name', 'environment'),
)
ordering = ('name', )
@property
def is_in_memory(self):
return self.engine.engine_type.is_in_memory
@property
def has_persistence(self):
return self.plan.has_persistence
@property
def has_persistense_equivalent_plan(self):
if self.plan.persistense_equivalent_plan:
return True
return False
@property
def persistence_change_text(self):
if self.has_persistence:
return 'Change to Memory Only'
return 'Change to Persisted'
@property
def infra(self):
return self.databaseinfra
@property
def engine_type(self):
return self.infra.engine_name
@property
def engine(self):
return self.infra.engine
@property
def plan(self):
return self.databaseinfra and self.databaseinfra.plan
def pin_task(self, task):
try:
with transaction.atomic():
DatabaseLock(database=self, task=task).save()
except Error:
return False
else:
return True
@staticmethod
def __clean_task(task_name):
if task_name.endswith('_rollback'):
return task_name.rsplit('_rollback', 1)[0]
if task_name.endswith('_retry'):
return task_name.rsplit('_retry', 1)[0]
return task_name
def update_task(self, task):
lock = self.lock.first()
if not lock:
return self.pin_task(task)
with transaction.atomic():
lock = DatabaseLock.objects.select_for_update().filter(
database=self
).first()
task_name = self.__clean_task(task.task_name)
lock_task_name = self.__clean_task(lock.task.task_name)
if lock_task_name != task_name or not lock.task.is_status_error:
return False
lock.task = task
lock.save()
return True
def finish_task(self):
for instance in self.infra.instances.all():
try:
instance.update_status()
except Exception as e:
LOG.error(
"Could not refresh status for {} - {}".format(instance, e)
)
continue
try:
self.update_status()
except Exception as e:
LOG.error("Could not refresh status for {} - {}".format(self, e))
self.unpin_task()
def update_status(self):
self.status = Database.DEAD
if self.database_status and self.database_status.is_alive:
self.status = Database.ALIVE
instances_status = self.databaseinfra.check_instances_status()
if instances_status == self.databaseinfra.ALERT:
self.status = Database.ALERT
self.save(update_fields=['status'])
def unpin_task(self):
DatabaseLock.objects.filter(database=self).delete()
@property
def current_locked_task(self):
lock = self.lock.first()
if lock:
return lock.task
def delete(self, *args, **kwargs):
if self.is_in_quarantine:
LOG.warning(
"Database {} is in quarantine and will be removed".format(
self.name
)
)
for credential in self.credentials.all():
instance = factory_for(self.databaseinfra)
instance.try_remove_user(credential)
engine = self.databaseinfra.engine
databaseinfra = self.databaseinfra
try:
DatabaseHistory.objects.create(
database_id=self.id,
name=self.name,
description=self.description,
engine='{} {}'.format(
engine.engine_type.name,
engine.version
),
project=self.project.name if self.project else '',
team=self.team.name if self.team else '',
databaseinfra_name=databaseinfra.name,
plan=databaseinfra.plan.name,
disk_size_kb=databaseinfra.disk_offering.size_kb,
has_persistence=databaseinfra.plan.has_persistence,
environment=self.environment.name,
created_at=self.created_at
)
except Exception as err:
LOG.error(
('Error on creating database history for '
'"database {}: {}'.format(self.id, err)))
super(Database, self).delete(*args, **kwargs)
else:
LOG.warning("Putting database {} in quarantine".format(self.name))
self.is_in_quarantine = True
self.is_protected = False
self.save()
if self.credentials.exists():
for credential in self.credentials.all():
new_password = <PASSWORD>()
new_credential = Credential.objects.get(pk=credential.id)
new_credential.password = <PASSWORD>
new_credential.save()
instance = factory_for(self.databaseinfra)
instance.try_update_user(new_credential)
def clean(self):
if not self.pk:
self.name = slugify(self.name)
if self.name in self.__get_database_reserved_names():
raise ValidationError(
_("{} is a reserved database name".format(
self.name
))
)
def automatic_create_first_credential(self):
LOG.info("creating new credential for database {}".format(self.name))
user = Credential.USER_PATTERN % self.name
credential = Credential.create_new_credential(user, self)
return credential
@classmethod
def provision(cls, name, databaseinfra):
if not isinstance(databaseinfra, DatabaseInfra):
raise ValidationError(
'Invalid databaseinfra type {} - {}'.format(
type(databaseinfra), databaseinfra
)
)
database = Database()
database.databaseinfra = databaseinfra
database.environment = databaseinfra.environment
database.name = name
database.full_clean()
database.save()
database = Database.objects.get(pk=database.pk)
return database
def __get_database_reserved_names(self):
return getattr(self.driver, 'RESERVED_DATABASES_NAME', [])
@property
def driver(self):
if self.databaseinfra_id is not None:
return self.databaseinfra.get_driver()
def get_endpoint(self):
return self.driver.get_connection(database=self)
def get_endpoint_dns(self):
return self.driver.get_connection_dns(database=self)
def get_endpoint_dns_simple(self):
return self.driver.get_connection_dns_simple(database=self)
def __graylog_url(self):
from util import get_credentials_for
from dbaas_credentials.models import CredentialType
if self.databaseinfra.plan.is_pre_provisioned:
return ""
credential = get_credentials_for(
environment=self.environment,
credential_type=CredentialType.GRAYLOG
)
stream = credential.get_parameter_by_name(
'stream_{}'.format(self.plan.engine.engine_type.name)
)
search_field = credential.get_parameter_by_name('search_field')
if not stream or not search_field:
return ""
return "{}/streams/{}/search?q={}:{}".format(
credential.endpoint, stream, search_field, self.name
)
def __kibana_url(self):
from util import get_credentials_for
from dbaas_credentials.models import CredentialType
if self.databaseinfra.plan.is_pre_provisioned:
return ""
credential = get_credentials_for(
environment=self.environment,
credential_type=CredentialType.KIBANA_LOG
)
search_field = credential.get_parameter_by_name('search_field')
if not search_field:
return ""
time_query = "_g=(filters:!(),refreshInterval:(pause:!t,value:0),time:(from:now-6h,to:now))"
filter_query = "_a=(columns:!(_source),filters:!(),interval:auto,query:(language:lucene,query:'{}:{}'))".format(
search_field, self.name
)
return "{}/app/kibana#/discover?{}&{}".format(
credential.endpoint, time_query, filter_query
)
def get_log_url(self):
if Configuration.get_by_name_as_int('graylog_integration') == 1:
return self.__graylog_url()
if Configuration.get_by_name_as_int('kibana_integration') == 1:
return self.__kibana_url()
def get_dex_url(self):
if Configuration.get_by_name_as_int('dex_analyze') != 1:
return ""
if self.databaseinfra.plan.is_pre_provisioned:
return ""
if self.engine_type != 'mongodb':
return ""
return 1
def get_is_preprovisioned(self):
return self.databaseinfra.plan.is_pre_provisioned
endpoint = property(get_endpoint)
endpoint_dns = property(get_endpoint_dns)
@cached_property
def database_status(self):
try:
info = self.databaseinfra.get_info()
if info is None:
return None
database_status = info.get_database_status(self.name)
if database_status is None:
# try get without cache
info = self.databaseinfra.get_info(force_refresh=True)
database_status = info.get_database_status(self.name)
except ConnectionError as e:
msg = ("ConnectionError calling database_status for database {}:"
"{}").format(self, e)
LOG.error(msg)
database_status = DatabaseStatus(self)
return database_status
def get_offering_name(self):
LOG.info("Get offering")
try:
offer_name = self.infra.offering.name
except Exception as e:
LOG.info("Oops...{}".format(e))
offer_name = None
return offer_name
offering = property(get_offering_name)
@property
def total_size(self):
return self.driver.masters_total_size_in_bytes
@property
def total_size_in_kb(self):
return round(self.driver.masters_total_size_in_bytes * KB_FACTOR, 2)
@property
def total_size_in_mb(self):
return round(self.driver.masters_total_size_in_bytes * MB_FACTOR, 2)
@property
def total_size_in_gb(self):
return round(self.driver.masters_total_size_in_bytes * GB_FACTOR, 2)
@property
def used_size_in_kb(self):
return self.driver.masters_used_size_in_bytes * KB_FACTOR
@property
def used_size_in_mb(self):
return self.driver.masters_used_size_in_bytes * MB_FACTOR
@property
def used_size_in_gb(self):
return self.driver.masters_used_size_in_bytes * GB_FACTOR
@property
def capacity(self):
if self.status:
return round(
((1.0 * self.used_size_in_bytes / self.total_size)
if self.total_size else 0, 2))
@classmethod
def purge_quarantine(self):
quarantine_time = Configuration.get_by_name_as_int(
'quarantine_retention_days')
quarantine_time_dt = date.today() - timedelta(days=quarantine_time)
databases = Database.objects.filter(
is_in_quarantine=True, quarantine_dt__lte=quarantine_time_dt
)
for database in databases:
database.delete()
LOG.info(
("The database %s was deleted, because it was set to "
"quarentine %d days ago") % (database.name, quarantine_time)
)
@classmethod
def clone(cls, database, clone_name, plan, environment, user):
from notification.tasks import TaskRegister
TaskRegister.database_clone(
origin_database=database, clone_name=clone_name, plan=plan,
environment=environment, user=user
)
@classmethod
def restore(cls, database, snapshot, user):
from notification.tasks import TaskRegister
LOG.info(
("Changing database volume with params: "
"database {} snapshot: {}, user: {}").format(
database, snapshot, user
)
)
TaskRegister.restore_snapshot(
database=database, snapshot=snapshot, user=user
)
@classmethod
def resize(cls, database, offering, user):
from notification.tasks import TaskRegister
TaskRegister.database_resize(
database=database, user=user,
offering=offering
)
# @classmethod
# def recover_snapshot(cls, database, snapshot, user, task_history):
# from backup.tasks import restore_snapshot
#
# restore_snapshot.delay(
# database=database, snapshot=snapshot, user=user,
# task_history=task_history
# )
def get_metrics_url(self):
return "/admin/logical/database/{}/metrics/".format(self.id)
def get_resize_retry_url(self):
return "/admin/logical/database/{}/resize_retry/".format(self.id)
def get_resize_rollback_url(self):
return "/admin/logical/database/{}/resize_rollback/".format(self.id)
def get_disk_resize_url(self):
return "/admin/logical/database/{}/disk_resize/".format(self.id)
def get_add_instances_database_retry_url(self):
return "/admin/logical/database/{}/add_instances_database_retry/".format(self.id)
def get_add_instances_database_rollback_url(self):
return "/admin/logical/database/{}/add_instances_database_rollback/".format(self.id)
def get_remove_instance_database_retry_url(self):
return "/admin/logical/database/{}/remove_instance_database_retry/".format(self.id)
def get_mongodb_engine_version_upgrade_url(self):
return ("/admin/logical/database/{}/"
"mongodb_engine_version_upgrade/").format(self.id)
def get_upgrade_url(self):
return "/admin/logical/database/{}/upgrade/".format(self.id)
def get_upgrade_retry_url(self):
return "/admin/logical/database/{}/upgrade_retry/".format(self.id)
def get_migrate_engine_retry_url(self):
return "/admin/logical/database/{}/migrate_engine_retry/".format(self.id)
def get_upgrade_patch_url(self):
return "/admin/logical/database/{}/upgrade_patch/".format(self.id)
def get_upgrade_patch_retry_url(self):
return "/admin/logical/database/{}/upgrade_patch_retry/".format(
self.id
)
def get_change_parameters_retry_url(self):
return "/admin/logical/database/{}/change_parameters_retry/".format(
self.id
)
def get_reinstallvm_retry_url(self):
return "/admin/logical/database/{}/reinstallvm_retry/".format(self.id)
def get_recreateslave_retry_url(self):
return "/admin/logical/database/{}/recreateslave_retry/".format(
self.id
)
def get_configure_ssl_url(self):
return "/admin/logical/database/{}/configure_ssl/".format(self.id)
def get_configure_ssl_retry_url(self):
return "/admin/logical/database/{}/configure_ssl_retry/".format(
self.id
)
def get_set_ssl_required_url(self):
return "/admin/logical/database/{}/set_ssl_required/".format(self.id)
def get_set_ssl_required_retry_url(self):
return "/admin/logical/database/{}/set_ssl_required_retry/".format(
self.id
)
def get_set_ssl_not_required_url(self):
return "/admin/logical/database/{}/set_ssl_not_required/".format(
self.id)
def get_set_ssl_not_required_retry_url(self):
return "/admin/logical/database/{}/set_ssl_not_required_retry/".format(
self.id
)
def get_change_persistence_url(self):
return "/admin/logical/database/{}/change_persistence/".format(self.id)
def get_change_persistence_retry_url(self):
return "/admin/logical/database/{}/change_persistence_retry/".format(
self.id
)
def is_mongodb_24(self):
engine = self.engine
if engine.name == 'mongodb' and engine.version.startswith('2.4'):
return True
return False
def get_offering_id(self):
LOG.info("Get offering")
try:
offer_id = self.infra.plan.stronger_offering.id
except Exception as e:
LOG.info("Oops...{}".format(e))
offer_id = None
return offer_id
offering_id = property(get_offering_id)
def is_being_used_elsewhere(self, skip_tasks=None):
tasks = TaskHistory.objects.filter(
task_status=TaskHistory.STATUS_WAITING,
object_id=self.id,
object_class=self._meta.db_table)
if tasks:
return True
if not self.current_locked_task:
return False
skip_tasks = skip_tasks or []
if self.current_locked_task.task_name in skip_tasks:
if self.current_locked_task.is_status_error:
return False
return True
def restore_allowed(self):
if Configuration.get_by_name_as_int('restore_allowed') == 1:
return True
return False
def has_offerings(self):
offerings = self.environment.offerings.exclude(id=self.offering_id)
return bool(offerings)
def has_disk_offerings(self):
from physical.models import DiskOffering
offerings = DiskOffering.objects.exclude(
id=self.databaseinfra.disk_offering.id
)
return bool(offerings)
@property
def can_modify_parameters(self):
if self.plan.replication_topology.parameter.all():
return True
else:
return False
@property
def is_host_migrate_available(self):
from util.providers import get_host_migrate_steps
class_path = self.plan.replication_topology.class_path
try:
get_host_migrate_steps(class_path)
except NotImplementedError:
return False
else:
return True
@property
def is_dead(self):
if self.status != Database.ALIVE:
return True
if self.database_status and not self.database_status.is_alive:
return True
return False
@classmethod
def disk_resize(cls, database, new_disk_offering, user):
from physical.models import DiskOffering
from notification.tasks import TaskRegister
disk_offering = DiskOffering.objects.get(id=new_disk_offering)
TaskRegister.database_disk_resize(
database=database, user=user, disk_offering=disk_offering
)
def update_host_disk_used_size(self, host_address, used_size_kb,
total_size_kb=None):
instance = self.databaseinfra.instances.filter(
address=host_address
).first()
if not instance:
raise ObjectDoesNotExist()
volume = instance.hostname.volumes.last()
if not volume:
return None
if total_size_kb:
volume.total_size_kb = total_size_kb
volume.used_size_kb = used_size_kb
volume.save()
return volume
def can_be_cloned(self, database_view_button=False):
if not self.plan.has_persistence:
return False, "Database does not have persistence cannot be cloned"
if self.is_being_used_elsewhere():
return False, "Database is being used by another task"
if self.is_in_quarantine:
return False, "Database in quarantine cannot be cloned"
if database_view_button:
if self.status != self.ALIVE:
return False, "Database is not alive and cannot be cloned"
else:
if self.is_dead:
return False, "Database is not alive and cannot be cloned"
return True, None
def can_be_restored(self):
if not self.restore_allowed():
return False, ('Restore is not allowed. Please, contact DBaaS '
'team for more information')
if self.is_in_quarantine:
return False, "Database in quarantine cannot be restored"
if self.status != self.ALIVE or self.is_dead:
return False, "Database is not alive and cannot be restored"
if self.is_being_used_elsewhere():
return False, ("Database is being used by another task, please "
"check your tasks")
return True, None
def can_be_deleted(self):
error = None
if self.is_protected and not self.is_in_quarantine:
error = "Database {} is protected and cannot be deleted"
# elif self.is_dead:
# error = "Database {} is not alive and cannot be deleted"
# elif self.is_being_used_elsewhere():
# error = "Database {} cannot be deleted because" \
# " it is in use by another task."
if error:
return False, error.format(self.name)
return True, None
def can_do_upgrade_retry(self):
error = None
if self.is_mongodb_24():
error = "MongoDB 2.4 cannot be upgraded by this task."
elif self.is_in_quarantine:
error = "Database in quarantine and cannot be upgraded."
elif self.is_being_used_elsewhere([('notification.tasks'
'.upgrade_database')]):
error = "Database cannot be upgraded because " \
"it is in use by another task."
elif not self.infra.plan.engine_equivalent_plan:
error = "Source plan do not has equivalent plan to upgrade."
if error:
return False, error
return True, None
def can_do_upgrade(self):
can_do_upgrade, error = self.can_do_upgrade_retry()
if can_do_upgrade:
if self.is_dead:
error = "Database is dead and cannot be upgraded."
elif self.is_being_used_elsewhere():
error = "Database cannot be upgraded because " \
"it is in use by another task."
if error:
return False, error
return True, None
def can_do_engine_migration(self, retry=False):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot be upgraded."
elif self.is_being_used_elsewhere([('notification.tasks'
'.migrate_engine')]):
error = "Database engine cannot be migrated because " \
"it is in use by another task."
elif not retry and self.is_dead:
error = "Database is dead and cannot be upgraded."
elif not retry and self.is_being_used_elsewhere():
error = "Database engine cannot be migrated because " \
"it is in use by another task."
if error:
return False, error
return True, None
def can_do_upgrade_patch_retry(self):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot be upgraded."
elif self.is_being_used_elsewhere(
['notification.tasks.upgrade_database_patch']
):
error = "Database cannot be upgraded because " \
"it is in use by another task."
if error:
return False, error
return True, None
def can_do_upgrade_patch(self):
can_do_upgrade, error = self.can_do_upgrade_patch_retry()
if can_do_upgrade:
if self.is_dead:
error = "Database is dead and cannot be upgraded."
elif self.is_being_used_elsewhere():
error = "Database cannot be upgraded because " \
"it is in use by another task."
if error:
return False, error
return True, None
def can_do_resize_retry(self):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot be resized."
elif not self.has_offerings:
error = "There is no offerings for this database."
elif self.is_being_used_elsewhere(['notification.tasks.resize_database', 'notification.tasks.resize_database_rollback']):
error = "Database cannot be resized because" \
" it is in use by another task."
if error:
return False, error
return True, None
def can_do_resize(self):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot be resized."
elif not self.has_offerings:
error = "There is no offerings for this database."
elif self.is_dead:
error = "Database is dead and cannot be resized."
elif self.is_being_used_elsewhere():
error = "Database cannot be resized because" \
" it is in use by another task."
if error:
return False, error
return True, None
def can_do_change_parameters_retry(self):
error = None
if self.is_in_quarantine:
error = ("Database in quarantine and cannot have the parameters "
"changed.")
elif self.is_being_used_elsewhere([('notification.tasks'
'.change_parameters_database')]):
error = "Database cannot have the parameters changed because" \
" it is in use by another task."
if error:
return False, error
return True, None
def can_do_change_parameters(self):
error = None
if self.is_in_quarantine:
error = ("Database in quarantine and cannot have the parameters "
"changed.")
elif self.is_dead:
error = "Database is dead and cannot have the parameters changed."
elif self.is_being_used_elsewhere():
error = "Database cannot have the parameters changed because" \
" it is in use by another task."
if error:
return False, error
return True, None
def can_migrate_host(self):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot have host migrate."
elif self.is_dead:
error = "Database is dead and cannot migrate host"
elif self.is_being_used_elsewhere():
error = ("Database cannot migrate host it is in use "
"by another task.")
if error:
return False, error
return True, None
def can_do_change_persistence_retry(self):
error = None
if self.is_in_quarantine:
error = ("Database in quarantine and cannot have the persistence "
"changed.")
elif self.is_being_used_elsewhere([('notification.tasks'
'.change_database_persistence')]):
error = "Database cannot have the persistence changed because" \
" it is in use by another task."
elif not self.has_persistense_equivalent_plan:
error = "Database cannot have the persistence changed because" \
" it has not any persistense equivalent plan "
if error:
return False, error
return True, None
def can_do_change_persistence(self):
error = None
if self.is_in_quarantine:
error = ("Database in quarantine and cannot have the persistence "
"changed.")
elif self.is_dead:
error = "Database is dead and cannot have the persistence changed."
elif self.is_being_used_elsewhere():
error = "Database cannot have the persistence changed because" \
" it is in use by another task."
elif not self.has_persistense_equivalent_plan:
error = "Database cannot have the persistence changed because" \
" it has not any persistense equivalent plan "
if error:
return False, error
return True, None
def can_do_disk_resize(self):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot be resized."
elif self.is_being_used_elsewhere():
error = "Database cannot be resized because" \
" it is in use by another task."
elif not self.has_disk_offerings:
error = "There is no other disk offering for this database."
if error:
return False, error
return True, None
def can_do_configure_ssl_retry(self):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot have SSL cofigured."
elif self.is_being_used_elsewhere([('notification.tasks'
'.configure_ssl_database')]):
error = "Database cannot have SSL cofigured because " \
"it is in use by another task."
if error:
return False, error
return True, None
def can_do_configure_ssl(self):
can_do_configure_ssl, error = self.can_do_configure_ssl_retry()
if can_do_configure_ssl:
if self.is_dead:
error = "Database is dead and cannot have SSL cofigured."
elif self.is_being_used_elsewhere():
error = "Database cannot have SSL cofigured because " \
"it is in use by another task."
if error:
return False, error
return True, None
def can_do_set_ssl_required_retry(self):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot have set SSL " \
"required."
elif self.is_being_used_elsewhere([('notification.tasks'
'.database_set_ssl_required')]):
error = "Database cannot have set SSL required " \
"because it is in use by another task."
if error:
return False, error
return True, None
def can_do_set_ssl_required(self):
can_do_set_ssl_required, error = self.can_do_set_ssl_required_retry()
if can_do_set_ssl_required:
if self.is_dead:
error = "Database is dead and cannot have set SSL required."
elif self.is_being_used_elsewhere():
error = "Database cannot have set SSL required " \
"because it is in use by another task."
if error:
return False, error
return True, None
def can_do_set_ssl_not_required_retry(self):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot have set SSL not " \
"required."
elif self.is_being_used_elsewhere(
[('notification.tasks.database_set_ssl_not_required')]):
error = "Database cannot have set SSL not required " \
"because it is in use by another task."
if error:
return False, error
return True, None
def can_do_set_ssl_not_required(self):
can_do_ssl, error = self.can_do_set_ssl_not_required_retry()
if can_do_ssl:
if self.is_dead:
error = "Database is dead and cannot have set SSL not " \
"required."
elif self.is_being_used_elsewhere():
error = "Database cannot have set SSL not required " \
"because it is in use by another task."
if error:
return False, error
return True, None
def destroy(self, user):
if not self.is_in_quarantine:
self.delete()
return
if self.plan.provider != self.plan.CLOUDSTACK:
self.delete()
return
LOG.debug(
"call destroy_database - name={}, team={}, project={}, "
"user={}".format(self.name, self.team, self.project, user)
)
from notification.tasks import TaskRegister
TaskRegister.database_destroy(database=self, user=user)
return
@property
def last_successful_upgrade(self):
from maintenance.models import DatabaseUpgrade
return self.upgrades.filter(status=DatabaseUpgrade.SUCCESS).last()
@property
def status_html(self):
html_default = '<span class="label label-{}">{}</span>'
if self.status == Database.ALIVE:
status = html_default.format("success", "Alive")
elif self.status == Database.DEAD:
status = html_default.format("important", "Dead")
elif self.status == Database.ALERT:
status = html_default.format("warning", "Alert")
else:
status = html_default.format("info", "Initializing")
return format_html(status)
@property
def migrating_html(self):
html_default = ' <span class="label label-{}">{}</span>'
if self.infra.migration_in_progress:
status = html_default.format("info", "Migrating")
return format_html(status)
return ""
@property
def organization(self):
return self.team.organization
class DatabaseLock(BaseModel):
database = models.ForeignKey(
Database, related_name="lock", unique=True
)
task = models.ForeignKey(
TaskHistory, related_name="lock"
)
class Credential(BaseModel):
USER_PATTERN = "u_%s"
USER_MAXIMUM_LENGTH_NAME = 16
user = models.CharField(verbose_name=_("User name"), max_length=100)
password = EncryptedCharField(
verbose_name=_("<PASSWORD>"), max_length=255)
database = models.ForeignKey(Database, related_name="credentials")
force_ssl = models.BooleanField(default=False)
OWNER = 'Owner'
READ_WRITE = 'Read-Write'
READ_ONLY = 'Read-Only'
PRIVILEGES_CHOICES = {
(OWNER, 'Owner'),
(READ_WRITE, 'Read-Write'),
(READ_ONLY, 'Read-Only'),
}
privileges = models.CharField(max_length=10, choices=PRIVILEGES_CHOICES,
default=OWNER)
def __unicode__(self):
return u"%s" % self.user
class Meta:
permissions = (
("view_credential", "Can view credentials"),
)
unique_together = (
('user', 'database'),
)
ordering = ('database', 'user',)
def clean(self):
if len(self.user) > self.USER_MAXIMUM_LENGTH_NAME:
raise ValidationError(_("%s is too long" % self.user))
@cached_property
def driver(self):
return self.database.databaseinfra.get_driver()
def reset_password(self):
""" Reset credential password to a new random password """
self.password = <PASSWORD>()
self.driver.update_user(self)
self.save()
@property
def ssl_swap_label(self):
if self.force_ssl:
return "Disable SSL"
else:
return "Enable SSL"
def swap_force_ssl(self):
if self.force_ssl:
self.force_ssl = False
self.driver.set_user_not_require_ssl(self)
self.save()
else:
self.force_ssl = True
self.driver.set_user_require_ssl(self)
self.save()
@classmethod
def create_new_credential(cls, user, database, privileges="Owner"):
credential = Credential()
credential.database = database
credential.user = user[:cls.USER_MAXIMUM_LENGTH_NAME]
credential.user = slugify(credential.user)
credential.password = <PASSWORD>()
credential.privileges = privileges
credential.full_clean()
credential.driver.create_user(credential)
credential.save()
return credential
def delete(self, *args, **kwargs):
self.driver.remove_user(self)
LOG.info('User removed from driver')
super(Credential, self).delete(*args, **kwargs)
#
# SIGNALS
#
@receiver(pre_delete, sender=Database)
def database_pre_delete(sender, **kwargs):
"""
database pre delete signal. Removes database from the engine
"""
database = kwargs.get("instance")
LOG.debug("database pre-delete triggered")
engine = factory_for(database.databaseinfra)
engine.try_remove_database(database)
@receiver(post_save, sender=Database, dispatch_uid="database_drive_credentials")
def database_post_save(sender, **kwargs):
"""
Database post save signal. Creates the database in the driver and
creates a new credential.
"""
database = kwargs.get("instance")
is_new = kwargs.get("created")
LOG.debug("database post-save triggered")
if is_new and database.engine_type != 'redis':
LOG.info(
("a new database (%s) were created... "
"provision it in the engine" % (
database.name))
)
engine = factory_for(database.databaseinfra)
engine.create_database(database)
database.automatic_create_first_credential()
@receiver(pre_save, sender=Database)
def database_pre_save(sender, **kwargs):
from notification.tasks import TaskRegister
database = kwargs.get('instance')
if database.is_in_quarantine:
if database.quarantine_dt is None:
database.quarantine_dt = datetime.datetime.now().date()
if not database.quarantine_user:
from dbaas.middleware import UserMiddleware
database.quarantine_user = UserMiddleware.current_user()
else:
database.quarantine_dt = None
database.quarantine_user = None
if database.id:
saved_object = Database.objects.get(id=database.id)
if database.name != saved_object.name:
raise AttributeError(_("Attribute name cannot be edited"))
if database.team and saved_object.team:
if database.team.organization != saved_object.team.organization:
TaskRegister.update_organization_name_monitoring(
database=database,
organization_name=database.team.organization.name)
if saved_object.team.external:
TaskRegister.update_database_monitoring(
database=database,
hostgroup=(saved_object.team.organization
.grafana_hostgroup),
action='remove')
if database.team.external:
TaskRegister.update_database_monitoring(
database=database,
hostgroup=database.team.organization.grafana_hostgroup,
action='add')
else:
# new database
if database_name_evironment_constraint(
database.name, database.environment.name):
raise AttributeError(
_('%s already exists in production!') % database.name
)
LOG.debug("slugfying database's name for %s" % database.name)
database.name = slugify(database.name)
@receiver(pre_save, sender=Credential)
def credential_pre_save(sender, **kwargs):
credential = kwargs.get('instance')
if credential.id:
saved_object = Credential.objects.get(id=credential.id)
if credential.user != saved_object.user:
raise AttributeError(_("Attribute user cannot be edited"))
if credential.database != saved_object.database:
raise AttributeError(_("Attribute database cannot be edited"))
@receiver(pre_save, sender=Project)
def project_pre_save(sender, **kwargs):
instance = kwargs.get('instance')
instance.slug = slugify(instance.name)
class NoDatabaseInfraCapacity(Exception):
""" There isn't databaseinfra capable to support a new database
with this plan """
pass
simple_audit.register(Project, Database, Credential)
|
reid/loss/triplet.py | zhangxinyu-tj/PAST | 112 | 11138679 | <filename>reid/loss/triplet.py
from __future__ import absolute_import
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
class OnlineTripletLoss(nn.Module):
def __init__(self, margin=0):
super(OnlineTripletLoss, self).__init__()
self.margin = margin
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, inputs, targets):
n = inputs.size(0)
# Compute pairwise distance, replace by the official when merged
dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)
dist = dist + dist.t()
dist.addmm_(1, -2, inputs, inputs.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
# For each anchor, find the hardest positive and negative
mask = targets.expand(n, n).eq(targets.expand(n, n).t())
dist_ap, dist_an = [], []
for i in range(n):
dist_ap.append(dist[i][mask[i]].max())
dist_an.append(dist[i][mask[i] == 0].min())
dist_ap = torch.cat(dist_ap)
dist_an = torch.cat(dist_an)
# Compute ranking hinge loss
y = dist_an.data.new()
y.resize_as_(dist_an.data)
y.fill_(1)
y = Variable(y)
loss = self.ranking_loss(dist_an, dist_ap, y)
prec = (dist_an.data > dist_ap.data).sum() * 1. / y.size(0)
return loss, prec
class OfflineTripletLoss(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin=0.1):
super(OfflineTripletLoss, self).__init__()
self.margin = margin
def forward(self, inputs, size_average=True):
batchsize = inputs[0].size(0)
anchor = inputs[0][0:int(batchsize/3)]
positive = inputs[0][int(batchsize/3):int(batchsize*2/3)]
negative = inputs[0][int(batchsize*2/3):]
anchor = anchor.view(int(batchsize/3), -1)
positive = positive.view(int(batchsize/3), -1)
negative = negative.view(int(batchsize/3), -1)
distance_positive = (anchor - positive).pow(2).sum(1) # .pow(.5)
distance_negative = (anchor - negative).pow(2).sum(1) # .pow(.5)
losses = F.relu(distance_positive - distance_negative + self.margin)
return losses.mean() if size_average else losses.sum()
|
cvxpy/expressions/constants/callback_param.py | hashstat/cvxpy | 3,285 | 11138688 | """
Copyright 2013 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.expressions.constants.parameter import Parameter
class CallbackParam(Parameter):
"""
A parameter whose value is obtained by evaluating a function.
"""
PARAM_COUNT = 0
def __init__(self, callback, shape=(), **kwargs) -> None:
self._callback = callback
super(CallbackParam, self).__init__(shape, **kwargs)
@property
def value(self):
"""Evaluate the callback to get the value.
"""
return self._validate_value(self._callback())
|
test/old_tests/_test_kv.py | syaiful6/aerospike-client-python | 105 | 11138692 | import unittest
import sys
import pytest
from .test_base_class import TestBaseClass
from aerospike import exception as e
aerospike = pytest.importorskip("aerospike")
try:
import aerospike
except:
print("Please install aerospike python client.")
sys.exit(1)
config = {"hosts": [("127.0.0.1", 3000)]}
# count records
count = 0
def count_records(input_tuple):
global count
count += 1
def count_records_false(input_tuple):
global count
count += 1
return False
def digest_only(key):
return (key[0], key[1], None, key[3])
class KVTestCase(unittest.TestCase, TestBaseClass):
def setup_class(cls):
KVTestCase.hostlist, KVTestCase.user, KVTestCase.password = TestBaseClass.get_hosts(
)
def setUp(self):
config = {"hosts": KVTestCase.hostlist}
if KVTestCase.user is None and KVTestCase.password is None:
self.client = aerospike.client(config).connect()
else:
self.client = aerospike.client(config).connect(KVTestCase.user,
KVTestCase.password)
def tearDown(self):
self.client.close()
def test_1(self):
'''
Using a single key,
'''
global count
key = ("test", "unittest", "1")
# cleanup records
def remove_record(input_tuple):
key, _, _ = input_tuple
self.client.remove(key)
self.client.scan("test", "unittest").foreach(remove_record)
recIn = {
"i": 1234,
"s": "abcd",
"b": bytearray("efgh", "utf-8"),
"l": [1357, "aceg", bytearray("aceg", "utf-8"), [1, 3, 5, 7],
{"a": 1,
"c": 3,
"e": 5,
"g": 7}],
"m": {
"i": 2468,
"s": "bdfh",
"l": [2468, "bdfh", bytearray("bdfh", "utf-8")],
"m": {"b": 2,
"d": 4,
"f": 6,
"h": 8}
},
}
# create the record
rc = self.client.put(key, recIn)
self.assertEqual(rc, 0, 'wrong return code')
# ensure existence
(key, meta) = self.client.exists(key)
self.assertTrue(meta is not None)
# count records
count = 0
self.client.scan("test", "unittest").foreach(count_records)
assert count == 1
self.assertEqual(count, 1, 'set should have 1 record')
# read it
(key, meta, recOut) = self.client.get(key)
self.assertEqual(recIn, recOut, 'records do not match')
# create the record
rc = self.client.put(key, {"hello": "world"})
self.assertEqual(rc, 0, 'wrong return code')
# augmented record
recIn["hello"] = "world"
# read it
(key, meta, recOut) = self.client.get(key)
self.assertEqual(recIn, recOut, 'records do not match')
# remove it
rc = self.client.remove(key)
self.assertEqual(rc, 0, 'wrong return code')
# ensure not existent
try:
(key, meta) = self.client.exists(key)
"""
We are making the api backward compatible. In case of
RecordNotFound an exception will not be raised.
Instead Ok response is returned withe the
meta as None. This might change with further releases.
"""
assert meta is None
except e.RecordNotFound as exception:
assert exception.code == 2
# count records
count = 0
self.client.scan("test", "unittest").foreach(count_records)
self.assertEqual(count, 0, 'set should be empty')
def test_2(self):
'''
Using a single key, with digest only.
'''
global count
key = ("test", "unittest", "1")
# cleanup records
def each_record(input_tuple):
key, _, _ = input_tuple
self.client.remove(key)
self.client.scan("test", "unittest").foreach(each_record)
recIn = {
"i": 1234,
"s": "abcd",
"b": bytearray("efgh", "utf-8"),
"l": [1357, "aceg", bytearray("aceg", "utf-8"), [1, 3, 5, 7],
{"a": 1,
"c": 3,
"e": 5,
"g": 7}],
"m": {
"i": 2468,
"s": "bdfh",
"l": [2468, "bdfh", bytearray("bdfh", "utf-8")],
"m": {"b": 2,
"d": 4,
"f": 6,
"h": 8}
},
'a': {u'aa': u'11'},
'k': {u'kk': u'22'}
}
# create the record
rc = self.client.put(key, recIn)
self.assertEqual(rc, 0, 'wrong return code')
# ensure existence
(key, meta) = self.client.exists(key)
self.assertTrue(meta is not None)
# count records
count = 0
self.client.scan("test", "unittest").foreach(count_records)
self.assertEqual(count, 1, 'set should have 1 record')
# read it
(key, meta, recOut) = self.client.get(digest_only(key))
self.assertEqual(recIn, recOut, 'records do not match')
# create the record
rc = self.client.put(digest_only(key), {"hello": "world"})
self.assertEqual(rc, 0, 'wrong return code')
# augmented record
recIn["hello"] = "world"
# read it
(key, meta, recOut) = self.client.get(digest_only(key))
self.assertEqual(recIn, recOut, 'records do not match')
# remove it
rc = self.client.remove(digest_only(key))
self.assertEqual(rc, 0, 'wrong return code')
# ensure not existent
try:
(key, meta) = self.client.exists(digest_only(key))
"""
We are making the api backward compatible. In case of
RecordNotFound an exception will not be raised.
Instead Ok response is returned withe the
meta as None. This might change with further releases.
"""
assert meta is None
except e.RecordNotFound as exception:
assert exception.code == 2
# count records
count = 0
self.client.scan("test", "unittest").foreach(count_records)
self.assertEqual(count, 0, 'set should be empty')
def test_3(self):
"""
Using multiple keys
"""
from aerospike import predicates as p
global count
for i in range(2):
key = ('test', 'unittest', i)
rec = {
'name': 'name%s' % (str(i)),
'addr': 'name%s' % (str(i)),
'age': i,
'no': i
}
self.client.put(key, rec)
self.client.index_integer_create('test', 'unittest', 'age',
'age_index', {})
query = self.client.query('test', 'unittest')
query.select("name", "age")
count = 0
query.where(p.between('age', 1, 3))
query.foreach(count_records_false)
self.assertEqual(count, 1, "foreach failed")
for i in range(2):
key = ('test', 'unittest', i)
self.client.remove(key)
self.client.index_remove('test', 'age_index', {})
suite = unittest.TestLoader().loadTestsFromTestCase(KVTestCase)
|
leet/strings/wordBreak.py | peterlamar/python-cp-cheatsheet | 140 | 11138727 | """
time: n^2
space: n
"""
class Solution:
# go through word and check if slice matches subword, go until the end
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
stk = [0]
visited = set()
while stk:
i = stk.pop()
visited.add(i)
# check slice at index i
for w in wordDict:
wend = i + len(w)
if s[i:wend] == w:
# return if we reach the end of s
if i + len(w) == len(s):
return True
if wend not in visited:
stk.append(wend)
return False
"""
time: n^3
space: n
"""
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
dp = [True] + [False] * len(s)
for i in range(1, len(s)+1):
for w in wordDict:
if s[:i].endswith(w):
dp[i] |= dp[i-len(w)]
return dp[-1] |
cvxpy/reductions/dgp2dcp/atom_canonicalizers/norm_inf_canon.py | QiuWJX/cvxpy | 3,285 | 11138741 | from cvxpy.atoms.max import max
from cvxpy.reductions.eliminate_pwl.atom_canonicalizers.max_canon import (
max_canon,)
def norm_inf_canon(expr, args):
assert len(args) == 1
tmp = max(args[0], expr.axis, expr.keepdims)
return max_canon(tmp, tmp.args)
|
python/src/stdbscan.py | eubr-bigsea/py-st-dbscan | 126 | 11138742 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from datetime import timedelta
import pyproj
class STDBSCAN(object):
def __init__(self, spatial_threshold=500.0, temporal_threshold=60.0,
min_neighbors=15):
"""
Python ST-DBSCAN implementation.
Because this algorithm needs to calculate multiple distances between
points, it optimizes by assuming latitude and longitude columns in
UTM projection. If it is not, convert them by using the
`coordinates.convert_to_utm` available method.
UTM projects onto a cylinder, and a cylinder is essentially flat (zero
Gaussian curvature) so the Euclidean formula would be accurate for
points on the cylinder (same Zone).
:param spatial_threshold: Maximum geographical coordinate (spatial)
distance value (meters);
:param temporal_threshold: Maximum non-spatial distance value (seconds);
:param min_neighbors: Minimum number of points within Eps1 and Eps2
distance;
"""
self.spatial_threshold = spatial_threshold
self.temporal_threshold = temporal_threshold
self.min_neighbors = min_neighbors
def _retrieve_neighbors(self, index_center, matrix):
center_point = matrix[index_center, :]
# filter by time
min_time = center_point[2] - timedelta(seconds=self.temporal_threshold)
max_time = center_point[2] + timedelta(seconds=self.temporal_threshold)
matrix = matrix[(matrix[:, 2] >= min_time) &
(matrix[:, 2] <= max_time), :]
# filter by distance
tmp = (matrix[:, 0]-center_point[0])*(matrix[:, 0]-center_point[0]) + \
(matrix[:, 1]-center_point[1])*(matrix[:, 1]-center_point[1])
neigborhood = matrix[tmp <= (
self.spatial_threshold*self.spatial_threshold), 4].tolist()
neigborhood.remove(index_center)
return neigborhood
def fit_transform(self, df, col_lat, col_lon, col_time,
col_cluster='cluster'):
"""
:param df: DataFrame input
:param col_lat: Latitude column name;
:param col_lon: Longitude column name;
:param col_time: Date time column name;
:param col_cluster: Alias for predicted cluster (default, 'cluster');
"""
cluster_label = 0
noise = -1
unmarked = 777777
stack = []
# initial setup
df = df[[col_lon, col_lat, col_time]]
df[col_cluster] = unmarked
df['index'] = range(df.shape[0])
matrix = df.values
df.drop(['index'], inplace=True, axis=1)
# for each point in database
for index in range(matrix.shape[0]):
if matrix[index, 3] == unmarked:
neighborhood = self._retrieve_neighbors(index, matrix)
if len(neighborhood) < self.min_neighbors:
matrix[index, 3] = noise
else: # found a core point
cluster_label += 1
# assign a label to core point
matrix[index, 3] = cluster_label
# assign core's label to its neighborhood
for neig_index in neighborhood:
matrix[neig_index, 3] = cluster_label
stack.append(neig_index) # append neighbors to stack
# find new neighbors from core point neighborhood
while len(stack) > 0:
current_point_index = stack.pop()
new_neighborhood = \
self._retrieve_neighbors(current_point_index,
matrix)
# current_point is a new core
if len(new_neighborhood) >= self.min_neighbors:
for neig_index in new_neighborhood:
neig_cluster = matrix[neig_index, 3]
if any([neig_cluster == noise,
neig_cluster == unmarked]):
matrix[neig_index, 3] = cluster_label
stack.append(neig_index)
df[col_cluster] = matrix[:, 3]
return df
|
src/ralph/assets/migrations/0025_auto_20170331_1341.py | DoNnMyTh/ralph | 1,668 | 11138755 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('assets', '0024_auto_20170322_1148'),
]
operations = [
migrations.AlterField(
model_name='asset',
name='model',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='assets.AssetModel', related_name='assets'),
),
]
|
synapse/mindmeld.py | ackroute/synapse | 216 | 11138762 | <gh_stars>100-1000
# reserved for future use
|
davarocr/davarocr/davar_det/models/losses/dice_loss.py | icedream2/DAVAR-Lab-OCR | 387 | 11138768 | """
#################################################################################################
# Copyright Info : Copyright (c) <NAME> @ Hikvision Research Institute. All rights reserved.
# Filename : dice_loss.py
# Abstract : Implements of dice loss. Refer to https://github.com/hubutui/DiceLoss-PyTorch
# Current Version: 1.0.0
# Date : 2020-05-31
#################################################################################################
"""
import torch
import torch.nn as nn
from mmdet.models.builder import LOSSES
def binaray_dice_loss(predict, target, smooth=1, p=2, weight=None):
"""Dice loss for binary classification
Args:
predict(Tensor): a tensor of shape [N, H, W]
target(Tensor): a tensor of shape same with predict
smooth(float): a float number to smooth loss, and avoid NaN error, default:1
p(int): Denominator value, \sum{x^p}+\sum{y^p}, default:2
weight: (Tensor): pixel-wised loss weight, the shape is [H, W]
Returns:
Tensor: loss tensor
"""
assert predict.shape[0] == target.shape[0]
if weight is not None:
predict = torch.mul(predict, weight)
target = torch.mul(target, weight)
predict = predict.contiguous().view(predict.shape[0], -1)
target = target.contiguous().view(target.shape[0], -1)
num = torch.sum(torch.mul(predict, target))*2 + smooth
den = torch.sum(predict.pow(p)+target.pow(p)) + smooth
loss = 1 - num / den
return loss
@LOSSES.register_module()
class DiceLoss(nn.Module):
"""Dice loss for multi-class classification. [1]
Ref: https://github.com/hubutui/DiceLoss-PyTorch
"""
def __init__(self,
smooth=1,
p=2,
loss_weight=1.0):
""" Initialization.
Args:
smooth(float): a float number to smooth loss, and avoid NaN error, default:1
p(int): Denominator value, \sum{x^p}+\sum{y^p}, default:2
loss_weight(float): loss weight
"""
super().__init__()
self.smooth = smooth
self.p = p
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
weight_in_channel=None
):
""" Multiply loss with loss_weight.
Args:
predict(Tensor): a tensor of shape [N, C, H, W]
target(Tensor): a tensor of shape same with predict
weight(Tensor): pixel-wised weight tensor, whose shape is [N, H, W]
weight_in_channel(Tensor): channel-wised weight tensor, whose shape is [N, C]
Returns:
Tensor: loss tensor
"""
loss = self.loss_weight * self._multi_cls_loss(pred, target, weight=weight, weight_in_channel=weight_in_channel)
return loss
def _multi_cls_loss(self, predict, target, weight=None, weight_in_channel=None):
"""Dice loss for multi-class classification (as the expected value of multiple dices
losses for binary classificaitions seperately)
Arg:
predict(Tensor): feature map predictions,
[N, num_classes, H, W], where for num_classes classes, each contains a map of shape [H, W]
target(Tensor) : feature map ground-truth labels (one-hot encoding)
[N, num_classes, H, W], where for num_classes classes, each contains a map of shape [H, W]
weight(Tensor) : [N, H, W], mask (or weight) of feature map ground-truth labels,
no loss generates in the pixel if corresponding element of weight is 0 mask (weight)
weight_in_channel(Tensor): [N, num_classes], weight for channels
Returns:
loss tensor
"""
assert predict.shape == target.shape
if weight is not None:
assert predict[0, 0].shape == weight[0].shape
if weight_in_channel is not None:
predict = torch.mul(predict, weight_in_channel)
target = torch.mul(target, weight_in_channel)
total_loss = 0
for i in range(target.shape[1]):
dice_loss = binaray_dice_loss(predict[:, i], target[:, i], self.smooth, self.p, weight=weight)
total_loss += dice_loss
return total_loss / target.shape[1]
|
examples/core/geom/projective_ops.py | mli0603/lietorch | 360 | 11138776 | import torch
import torch.nn.functional as F
from lietorch import SE3, Sim3
MIN_DEPTH = 0.1
def extract_intrinsics(intrinsics):
return intrinsics[...,None,None,:].unbind(dim=-1)
def iproj(disps, intrinsics):
""" pinhole camera inverse projection """
ht, wd = disps.shape[2:]
fx, fy, cx, cy = extract_intrinsics(intrinsics)
y, x = torch.meshgrid(
torch.arange(ht).to(disps.device).float(),
torch.arange(wd).to(disps.device).float())
i = torch.ones_like(disps)
X = (x - cx) / fx
Y = (y - cy) / fy
return torch.stack([X, Y, i, disps], dim=-1)
def proj(Xs, intrinsics, jacobian=False):
""" pinhole camera projection """
fx, fy, cx, cy = extract_intrinsics(intrinsics)
X, Y, Z, D = Xs.unbind(dim=-1)
d = torch.where(Z.abs() < 0.001, torch.zeros_like(Z), 1.0/Z)
x = fx * (X * d) + cx
y = fy * (Y * d) + cy
coords = torch.stack([x,y, D*d], dim=-1)
if jacobian:
B, N, H, W = d.shape
o = torch.zeros_like(d)
proj_jac = torch.stack([
fx*d, o, -fx*X*d*d, o,
o, fy*d, -fy*Y*d*d, o,
o, o, -D*d*d, d,
], dim=-1).view(B, N, H, W, 3, 4)
return coords, proj_jac
return coords, None
def actp(Gij, X0, jacobian=False):
""" action on point cloud """
X1 = Gij[:,:,None,None] * X0
if jacobian:
X, Y, Z, d = X1.unbind(dim=-1)
o = torch.zeros_like(d)
B, N, H, W = d.shape
if isinstance(Gij, SE3):
Ja = torch.stack([
d, o, o, o, Z, -Y,
o, d, o, -Z, o, X,
o, o, d, Y, -X, o,
o, o, o, o, o, o,
], dim=-1).view(B, N, H, W, 4, 6)
elif isinstance(Gij, Sim3):
Ja = torch.stack([
d, o, o, o, Z, -Y, X,
o, d, o, -Z, o, X, Y,
o, o, d, Y, -X, o, Z,
o, o, o, o, o, o, o
], dim=-1).view(B, N, H, W, 4, 7)
return X1, Ja
return X1, None
def projective_transform(poses, depths, intrinsics, ii, jj, jacobian=False):
""" map points from ii->jj """
# inverse project (pinhole)
X0 = iproj(depths[:,ii], intrinsics[:,ii])
# transform
Gij = poses[:,jj] * poses[:,ii].inv()
X1, Ja = actp(Gij, X0, jacobian=jacobian)
# project (pinhole)
x1, Jp = proj(X1, intrinsics[:,jj], jacobian=jacobian)
# exclude points too close to camera
valid = ((X1[...,2] > MIN_DEPTH) & (X0[...,2] > MIN_DEPTH)).float()
valid = valid.unsqueeze(-1)
if jacobian:
Jj = torch.matmul(Jp, Ja)
Ji = -Gij[:,:,None,None,None].adjT(Jj)
return x1, valid, (Ji, Jj)
return x1, valid
def induced_flow(poses, disps, intrinsics, ii, jj):
""" optical flow induced by camera motion """
ht, wd = disps.shape[2:]
y, x = torch.meshgrid(
torch.arange(ht).to(disps.device).float(),
torch.arange(wd).to(disps.device).float())
coords0 = torch.stack([x, y], dim=-1)
coords1, valid = projective_transform(poses, disps, intrinsics, ii, jj)
return coords1[...,:2] - coords0, valid
|
openstack-seeder/python/setup.py | kayrus/kubernetes-operators | 127 | 11138789 | <filename>openstack-seeder/python/setup.py
from setuptools import setup
setup(
name='openstack_seeder',
version='2.0.1',
packages='.',
install_requires=[
'python-keystoneclient>=3.20.0',
'python-novaclient>=14.2.0',
'python-neutronclient>=6.12.0',
'python-designateclient>=2.11.0',
'python-swiftclient>=3.8.0',
'python-manilaclient>=1.27.0',
'python-cinderclient>=6.0.0',
'osc-placement>=1.4.0',
'raven',
'zipp==3.0.0',
'pyyaml>=4.2b4',
'pyparsing==2.1.0',
'oslo.serialization==2.29.2',
'funcsigs',
'oslo.config==7.0.0',
'python-dateutil>=2.7.0',
],
url='https://github.com/sapcc/kubernetes-operators/openstack-seeder',
license='',
author='<NAME>',
author_email='<EMAIL>',
description='Openstack Seeder',
entry_points = {
"console_scripts": [
'openstack-seed-loader = openstack_seeder:main',
]
},
)
|
research/object_detection/models/keras_models/resnet_v1_test.py | 873040/Abhishek | 153 | 11138794 | <filename>research/object_detection/models/keras_models/resnet_v1_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for resnet_v1.py.
This test mainly focuses on comparing slim resnet v1 and Keras resnet v1 for
object detection. To verify the consistency of the two models, we compare:
1. Output shape of each layer given different inputs.
2. Number of global variables.
"""
import numpy as np
from six.moves import zip
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.models.keras_models import resnet_v1
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
_EXPECTED_SHAPES_224_RESNET50 = {
'conv2_block3_out': (4, 56, 56, 256),
'conv3_block4_out': (4, 28, 28, 512),
'conv4_block6_out': (4, 14, 14, 1024),
'conv5_block3_out': (4, 7, 7, 2048),
}
_EXPECTED_SHAPES_224_RESNET101 = {
'conv2_block3_out': (4, 56, 56, 256),
'conv3_block4_out': (4, 28, 28, 512),
'conv4_block23_out': (4, 14, 14, 1024),
'conv5_block3_out': (4, 7, 7, 2048),
}
_EXPECTED_SHAPES_224_RESNET152 = {
'conv2_block3_out': (4, 56, 56, 256),
'conv3_block8_out': (4, 28, 28, 512),
'conv4_block36_out': (4, 14, 14, 1024),
'conv5_block3_out': (4, 7, 7, 2048),
}
_RESNET_NAMES = ['resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152']
_RESNET_MODELS = [
resnet_v1.resnet_v1_50, resnet_v1.resnet_v1_101, resnet_v1.resnet_v1_152
]
_RESNET_SHAPES = [
_EXPECTED_SHAPES_224_RESNET50, _EXPECTED_SHAPES_224_RESNET101,
_EXPECTED_SHAPES_224_RESNET152
]
_NUM_CHANNELS = 3
_BATCH_SIZE = 4
class ResnetV1Test(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.0004
}
}
initializer {
truncated_normal_initializer {
stddev: 0.03
mean: 0.0
}
}
batch_norm {
scale: true,
decay: 0.997,
epsilon: 0.001,
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _create_application_with_layer_outputs(self,
model_index,
batchnorm_training,
batchnorm_scale=True,
weight_decay=0.0001,
default_batchnorm_momentum=0.997,
default_batchnorm_epsilon=1e-5):
"""Constructs Keras resnet_v1 that extracts layer outputs."""
# Have to clear the Keras backend to ensure isolation in layer naming
tf.keras.backend.clear_session()
layer_names = _RESNET_SHAPES[model_index].keys()
full_model = _RESNET_MODELS[model_index](
batchnorm_training=batchnorm_training,
weights=None,
batchnorm_scale=batchnorm_scale,
weight_decay=weight_decay,
default_batchnorm_momentum=default_batchnorm_momentum,
default_batchnorm_epsilon=default_batchnorm_epsilon,
include_top=False)
layer_outputs = [
full_model.get_layer(name=layer).output for layer in layer_names
]
return tf.keras.Model(inputs=full_model.inputs, outputs=layer_outputs)
def _check_returns_correct_shape(self,
image_height,
image_width,
model_index,
expected_feature_map_shape,
batchnorm_training=True,
batchnorm_scale=True,
weight_decay=0.0001,
default_batchnorm_momentum=0.997,
default_batchnorm_epsilon=1e-5):
model = self._create_application_with_layer_outputs(
model_index=model_index,
batchnorm_training=batchnorm_training,
batchnorm_scale=batchnorm_scale,
weight_decay=weight_decay,
default_batchnorm_momentum=default_batchnorm_momentum,
default_batchnorm_epsilon=default_batchnorm_epsilon)
image_tensor = np.random.rand(_BATCH_SIZE, image_height, image_width,
_NUM_CHANNELS).astype(np.float32)
feature_maps = model(image_tensor)
layer_names = _RESNET_SHAPES[model_index].keys()
for feature_map, layer_name in zip(feature_maps, layer_names):
expected_shape = _RESNET_SHAPES[model_index][layer_name]
self.assertAllEqual(feature_map.shape, expected_shape)
def _get_variables(self, model_index):
tf.keras.backend.clear_session()
model = self._create_application_with_layer_outputs(
model_index, batchnorm_training=False)
preprocessed_inputs = tf.placeholder(tf.float32,
(4, None, None, _NUM_CHANNELS))
model(preprocessed_inputs)
return model.variables
def test_returns_correct_shapes_224(self):
image_height = 224
image_width = 224
for model_index, _ in enumerate(_RESNET_NAMES):
expected_feature_map_shape = _RESNET_SHAPES[model_index]
self._check_returns_correct_shape(image_height, image_width, model_index,
expected_feature_map_shape)
def test_hyperparam_override(self):
for model_name in _RESNET_MODELS:
model = model_name(
batchnorm_training=True,
default_batchnorm_momentum=0.2,
default_batchnorm_epsilon=0.1,
weights=None,
include_top=False)
bn_layer = model.get_layer(name='conv1_bn')
self.assertAllClose(bn_layer.momentum, 0.2)
self.assertAllClose(bn_layer.epsilon, 0.1)
def test_variable_count(self):
# The number of variables from slim resnetv1-* model.
variable_nums = [265, 520, 775]
for model_index, var_num in enumerate(variable_nums):
variables = self._get_variables(model_index)
self.assertEqual(len(variables), var_num)
if __name__ == '__main__':
tf.test.main()
|
hail/python/test/hail/utils/test_google_fs_utils.py | 3vivekb/hail | 789 | 11138808 | import unittest
import hail as hl
from hail.utils import hadoop_open, hadoop_copy
from hail.fs.hadoop_fs import HadoopFS
from ..helpers import startTestHailContext, stopTestHailContext, resource, _initialized
import os
setUpModule = startTestHailContext
tearDownModule = stopTestHailContext
class Tests(unittest.TestCase):
@classmethod
def setUpClass(cls):
bucket = os.environ.get("TEST_BUCKET_NAME", None)
if bucket is None:
raise unittest.case.SkipTest("TEST_BUCKET_NAME not set in env")
if 'HAIL_APISERVER_URL' not in os.environ:
raise unittest.case.SkipTest("HAIL_APISERVER_URL not set in env")
import secrets
with open('randomBytes', 'wb') as f:
f.write(secrets.token_bytes(2048))
if bucket.startswith('gs://'):
cls.remote_bucket = bucket
cls.local_dir = f"/tmp/{bucket[5:]}"
else:
cls.remote_bucket = f"gs://{bucket}"
cls.local_dir = f"/tmp/{bucket}"
@classmethod
def tearDownClass(cls):
import shutil
shutil.rmtree(cls.local_dir)
def test_hadoop_methods(self, bucket=None):
if bucket is None:
bucket = self.remote_bucket
data = ['foo', 'bar', 'baz']
data.extend(map(str, range(100)))
with hadoop_open(f'{bucket}/test_out.txt', 'w') as f:
for d in data:
f.write(d)
f.write('\n')
with hadoop_open(f'{bucket}/test_out.txt') as f:
data2 = [line.strip() for line in f]
self.assertEqual(data, data2)
with hadoop_open(f'{bucket}/test_out.txt.gz', 'w') as f:
for d in data:
f.write(d)
f.write('\n')
with hadoop_open(f'{bucket}/test_out.txt.gz') as f:
data3 = [line.strip() for line in f]
self.assertEqual(data, data3)
hadoop_copy(f'{bucket}/test_out.txt.gz',
f'{bucket}/test_out.copy.txt.gz')
with hadoop_open(f'{bucket}/test_out.copy.txt.gz') as f:
data4 = [line.strip() for line in f]
self.assertEqual(data, data4)
local_fs = HadoopFS()
with local_fs.open(resource('randomBytes'), buffer_size=100) as f:
with hadoop_open(f'{bucket}/randomBytesOut', 'w', buffer_size=2**18) as out:
b = f.read()
out.write(b)
with hadoop_open(f'{bucket}/randomBytesOut', buffer_size=2**18) as f:
b2 = f.read()
self.assertEqual(b, b2)
def test_hadoop_methods_local(self):
self.test_hadoop_methods(self.local_dir)
def test_hadoop_exists(self, bucket=None):
if bucket is None:
bucket = self.remote_bucket
with hadoop_open(f'{bucket}/test_exists.txt', 'w') as f:
f.write("HELLO WORLD")
r_exists = f'{bucket}/test_exists.txt'
r_not_exists = f'{bucket}/not_exists.txt'
self.assertTrue(hl.hadoop_exists(r_exists))
self.assertFalse(hl.hadoop_exists(r_not_exists))
def test_hadoop_exists_local(self):
self.test_hadoop_exists(self.local_dir)
def test_hadoop_is_file(self, bucket=None):
if bucket is None:
bucket = self.remote_bucket
a_file = f'{bucket}/test_hadoop_is_file.txt'
with hadoop_open(a_file, 'w') as f:
f.write("HELLO WORLD")
self.assertTrue(hl.hadoop_is_file(a_file))
self.assertFalse(hl.hadoop_is_file(f'{bucket}/'))
self.assertFalse(hl.hadoop_is_file(f'{bucket}/invalid-path'))
def test_hadoop_is_file_local(self):
self.test_hadoop_is_file(self.local_dir)
def test_hadoop_stat(self, bucket=None):
if bucket is None:
bucket = self.remote_bucket
stat1 = hl.hadoop_stat(f'{bucket}/')
self.assertEqual(stat1['is_dir'], True)
stat2 = hl.hadoop_stat(f'{bucket}/test_out.copy.txt.gz')
self.assertEqual(stat2['size_bytes'], 302)
self.assertEqual(stat2['is_dir'], False)
self.assertTrue('path' in stat2)
self.assertTrue('owner' in stat2)
self.assertTrue('modification_time' in stat2)
def test_hadoop_stat_local(self):
self.test_hadoop_stat(self.local_dir)
|
examples/dialogue/dialogue/Trainer.py | ZfSangkuan/ASER | 256 | 11138809 | <reponame>ZfSangkuan/ASER
import os
import torch
from dialogue.models.constructor import construct_model
from dialogue.toolbox.stats import Statistics
class Trainer(object):
def __init__(self, train_iter, valid_iter,
vocabs, optimizer, train_opt, logger):
self.train_iter = train_iter
self.valid_iter = valid_iter
self.vocabs = vocabs
self.optimizer = optimizer
self.train_opt = train_opt
self.model = construct_model(train_opt, vocabs["pre_word_emb"])
if train_opt.meta.use_cuda:
self.model = self.model.cuda()
self.logger = logger
self.optimizer.set_parameters(self.model.named_parameters())
self.best_score = float('inf')
self.step = 0
def train(self):
total_stats = Statistics(self.logger)
report_stats = Statistics(self.logger)
for batch in self.train_iter:
self.model.zero_grad()
result_dict = self.model.run_batch(batch)
loss = result_dict["loss"]
loss.div(self.train_opt.meta.batch_size).backward()
self.optimizer.step()
batch_stats = Statistics(num=result_dict["num_words"],
loss=loss.item(),
n_words=result_dict["num_words"],
n_correct=result_dict["num_correct"],
logger=self.logger)
total_stats.update(batch_stats)
report_stats.update(batch_stats)
if self.step and self.step % self.train_opt.meta.print_every == 0:
report_stats.output(self.step, self.train_opt.meta.total_steps)
if self.step and self.step % self.train_opt.meta.valid_every == 0:
self.hit_checkpoint(total_stats)
if self.step > self.train_opt.meta.total_steps:
break
self.step += 1
def _validate(self):
self.model.eval()
self.model.flatten_parameters()
stats = Statistics(self.logger)
for j, batch in enumerate(self.valid_iter):
result_dict = self.model.run_batch(batch)
batch_stats = Statistics(num=result_dict["num_words"],
loss=result_dict["loss"].item(),
n_words=result_dict["num_words"],
n_correct=result_dict["num_correct"],
logger=self.logger)
stats.update(batch_stats)
# Set model back to training mode.
self.model.train()
return stats
def hit_checkpoint(self, train_stats):
self.logger.info('Train loss: %g' % train_stats.get_loss())
self.logger.info('Train perplexity: %g' % train_stats.ppl())
self.logger.info('Train accuracy: %g' % train_stats.accuracy())
valid_stats = self._validate()
self.logger.info('Valid loss: %g' % valid_stats.get_loss())
self.logger.info('Valid perplexity: %g' % valid_stats.ppl())
self.logger.info('Valid accuracy: %g' % valid_stats.accuracy())
if valid_stats.ppl() < self.best_score:
self.best_score = valid_stats.ppl()
self.save_checkpoint(valid_stats.ppl(), "best_model.pt")
self.logger.info("Save best model..")
self.logger.info("Learning rate: {}".format(self.optimizer.learning_rate))
def save_checkpoint(self, score_dict, ckp_name):
model_file = {
"saved_step": self.step,
"model": self.model,
"score": score_dict,
"train_opt": self.train_opt,
"vocabs": self.vocabs,
}
torch.save(model_file, os.path.join(
self.train_opt.meta.save_model, ckp_name)) |
member.py | YiWeiShen/v2ex | 161 | 11138810 | <filename>member.py
#!/usr/bin/env python
# coding=utf-8
import os
import base64
import re
import time
import datetime
import hashlib
import httplib
import string
import pickle
from StringIO import StringIO
from django.utils import simplejson as json
from google.appengine.ext import webapp
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.api import images
from google.appengine.ext import db
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp import template
from v2ex.babel import Member
from v2ex.babel import Avatar
from v2ex.babel import Counter
from v2ex.babel.security import *
from v2ex.babel.ua import *
from v2ex.babel.da import *
from v2ex.babel.l10n import *
from v2ex.babel.ext.cookies import Cookies
from v2ex.babel.ext.sessions import Session
from v2ex.babel.ext.upyun import UpYun, md5, md5file
from v2ex.babel import SYSTEM_VERSION
from v2ex.babel.handlers import BaseHandler
import config
template.register_template_library('v2ex.templatetags.filters')
class MemberHandler(webapp.RequestHandler):
def get(self, member_username):
site = GetSite()
browser = detect(self.request)
self.session = Session()
template_values = {}
template_values['site'] = site
template_values['system_version'] = SYSTEM_VERSION
member = CheckAuth(self)
template_values['member'] = member
template_values['show_extra_options'] = False
if member:
if member.num == 1:
template_values['show_extra_options'] = True
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
one = False
one = GetMemberByUsername(member_username)
if one is not False:
if one.followers_count is None:
one.followers_count = 0
template_values['one'] = one
template_values['page_title'] = site.title + u' › ' + one.username
template_values['canonical'] = 'http://' + site.domain + '/member/' + one.username
if one.github:
github = memcache.get('Member::' + one.username_lower + '::github')
if github is None:
response = urlfetch.fetch("https://api.github.com/users/" + one.github + "/repos")
if response.status_code == 200:
github = response.content
memcache.set('Member::' + one.username_lower + '::github', github, 86400)
if github is not None:
template_values['github_repos'] = sorted(json.loads(github), key=lambda x:x['stargazers_count'], reverse=True)
if one is not False:
member_blog = memcache.get('member::' + str(one.num) + '::blog')
if member_blog == None:
blog = db.GqlQuery("SELECT * FROM Topic WHERE node_name = :1 AND member_num = :2 ORDER BY created DESC LIMIT 1", 'blog', one.num)
if blog.count() > 0:
template_values['blog'] = blog[0]
memcache.set('member::' + str(one.num) + '::blog', blog[0], 7200)
else:
template_values['blog'] = member_blog
member_topics = memcache.get('member::' + str(one.num) + '::topics')
if member_topics != None:
template_values['topics'] = member_topics
else:
q2 = db.GqlQuery("SELECT * FROM Topic WHERE member_num = :1 ORDER BY created DESC LIMIT 10", one.num)
template_values['topics'] = q2
memcache.set('member::' + str(one.num) + '::topics', q2, 7200)
replies = memcache.get('member::' + str(one.num) + '::participated')
if replies is None:
q3 = db.GqlQuery("SELECT * FROM Reply WHERE member_num = :1 ORDER BY created DESC LIMIT 10", one.num)
ids = []
replies = []
i = 0
for reply in q3:
if reply.topic.num not in ids:
i = i + 1
if i > 10:
break
replies.append(reply)
ids.append(reply.topic.num)
if len(replies) > 0:
memcache.set('member::' + str(one.num) + '::participated', replies, 7200)
if len(replies) > 0:
template_values['replies'] = replies
template_values['show_block'] = False
template_values['show_follow'] = False
template_values['favorited'] = False
if one and member:
if one.num != member.num:
template_values['show_follow'] = True
template_values['show_block'] = True
try:
blocked = pickle.loads(member.blocked.encode('utf-8'))
except:
blocked = []
if one.num in blocked:
template_values['one_is_blocked'] = True
else:
template_values['one_is_blocked'] = False
if member.hasFavorited(one):
template_values['favorited'] = True
else:
template_values['favorited'] = False
if 'message' in self.session:
template_values['message'] = self.session['message']
del self.session['message']
if one is not False:
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'member_home.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'member_home.html')
else:
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'member_not_found.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'member_not_found.html')
output = template.render(path, template_values)
self.response.out.write(output)
class MemberApiHandler(webapp.RequestHandler):
def get(self, member_username):
site = GetSite()
one = GetMemberByUsername(member_username)
if one:
if one.avatar_mini_url:
if (one.avatar_mini_url[0:1] == '/'):
one.avatar_mini_url = 'http://' + site.domain + one.avatar_mini_url
one.avatar_normal_url = 'http://' + site.domain + one.avatar_normal_url
one.avatar_large_url = 'http://' + site.domain + one.avatar_large_url
template_values = {}
template_values['site'] = site
template_values['one'] = one
path = os.path.join(os.path.dirname(__file__), 'tpl', 'api', 'member.json')
self.response.headers['Content-type'] = 'application/json;charset=UTF-8'
output = template.render(path, template_values)
self.response.out.write(output)
else:
self.error(404)
class SettingsHandler(webapp.RequestHandler):
def get(self):
site = GetSite()
browser = detect(self.request)
self.session = Session()
template_values = {}
template_values['site'] = site
template_values['system_version'] = SYSTEM_VERSION
member = CheckAuth(self)
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
template_values['page_title'] = site.title + u' › ' + l10n.settings.decode('utf-8')
if (member):
template_values['member'] = member
template_values['member_username'] = member.username
template_values['member_email'] = member.email
if (member.website == None):
member.website = ''
template_values['member_website'] = member.website
if (member.twitter == None):
member.twitter = ''
template_values['member_twitter'] = member.twitter
if (member.location == None):
member.location = ''
if member.psn is None:
member.psn = ''
template_values['member_psn'] = member.psn
if (member.my_home == None):
member.my_home = ''
template_values['member_my_home'] = member.my_home
template_values['member_btc'] = member.btc
if member.github:
template_values['member_github'] = member.github
else:
template_values['member_github'] = u''
template_values['member_location'] = member.location
if (member.tagline == None):
member.tagline = ''
template_values['member_tagline'] = member.tagline
if (member.bio == None):
member.bio = ''
template_values['member_bio'] = member.bio
template_values['member_show_home_top'] = member.show_home_top
template_values['member_show_quick_post'] = member.show_quick_post
if member.l10n is None:
member.l10n = 'en'
template_values['member_l10n'] = member.l10n
s = GetLanguageSelect(member.l10n)
template_values['s'] = s
if member.twitter_sync == 1:
template_values['member_twitter_sync'] = 1
if member.use_my_css == 1:
template_values['member_use_my_css'] = 1
if (member.my_css == None):
member.my_css = ''
template_values['member_my_css'] = member.my_css
if 'message' in self.session:
message = self.session['message']
del self.session['message']
else:
message = None
template_values['message'] = message
try:
blocked = pickle.loads(member.blocked.encode('utf-8'))
except:
blocked = []
template_values['member_stats_blocks'] = len(blocked)
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'member_settings.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'member_settings.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
self.redirect('/signin')
def post(self):
self.session = Session()
site = GetSite()
browser = detect(self.request)
template_values = {}
template_values['site'] = site
template_values['system_version'] = SYSTEM_VERSION
errors = 0
member = CheckAuth(self)
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
template_values['page_title'] = site.title + u' › ' + l10n.settings.decode('utf-8')
if (member):
template_values['member'] = member
template_values['member_username'] = member.username
template_values['member_email'] = member.email
template_values['member_website'] = member.website
template_values['member_twitter'] = member.twitter
# Verification: password
password_error = 0
password_update = False
password_error_messages = ['',
'新密码长度不能超过 32 个字符',
'请输入当前密码',
'当前密码不正确'
]
password_new = self.request.get('password_new').strip()
if (len(password_new) > 0):
password_update = True
if (len(password_new) > 32):
password_error = 1
else:
password_current = self.request.get('password_current').strip()
if (len(password_current) == 0):
password = 2
else:
password_current_sha1 = hashlib.sha1(password_current).hexdigest()
if (password_current_sha1 != member.password):
password_error = 3
template_values['password_error'] = password_error
template_values['password_error_message'] = password_error_messages[password_error]
if ((password_error == 0) and (password_update == True)):
member.password = hashlib.sha1(password_new).hexdigest()
member.auth = hashlib.sha1(str(member.num) + ':' + member.password).hexdigest()
member.put()
self.response.headers['Set-Cookie'] = 'auth=' + member.auth + '; expires=' + (datetime.datetime.now() + datetime.timedelta(days=365)).strftime("%a, %d-%b-%Y %H:%M:%S GMT") + '; path=/'
self.redirect('/settings')
# Verification: email
member_email_error = 0
member_email_error_messages = ['',
u'请输入你的电子邮件地址',
u'电子邮件地址长度不能超过 32 个字符',
u'你输入的电子邮件地址不符合规则',
u'抱歉这个电子邮件地址已经有人注册过了']
member_email = self.request.get('email').strip()
if (len(member_email) == 0):
errors = errors + 1
member_email_error = 1
else:
if (len(member_email) > 32):
errors = errors + 1
member_email_error = 2
else:
p = re.compile(r"(?:^|\s)[-a-z0-9_.+]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}(?:\s|$)", re.IGNORECASE)
if (p.search(member_email)):
q = db.GqlQuery('SELECT * FROM Member WHERE email = :1 AND num != :2', member_email.lower(), member.num)
if (q.count() > 0):
errors = errors + 1
member_email_error = 4
else:
errors = errors + 1
member_email_error = 3
template_values['member_email'] = member_email
template_values['member_email_error'] = member_email_error
template_values['member_email_error_message'] = member_email_error_messages[member_email_error]
# Verification: website
member_website_error = 0
member_website_error_messages = ['',
u'个人网站地址长度不能超过 200 个字符',
u'这个网站地址不符合规则'
]
member_website = self.request.get('website').strip()
if (len(member_website) == 0):
member_website = ''
else:
if (len(member_website) > 200):
errors = errors + 1
member_website_error = 1
else:
p = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
if (p.search(member_website)):
errors = errors
else:
errors = errors + 1
member_website_error = 2
template_values['member_website'] = member_website
template_values['member_website_error'] = member_website_error
template_values['member_website_error_message'] = member_website_error_messages[member_website_error]
# Verification: Twitter
member_twitter_error = 0
member_twitter_error_messages = ['',
u'Twitter 用户名长度不能超过 20 个字符',
u'Twitter 用户名不符合规则'
]
member_twitter = self.request.get('twitter').strip()
if (len(member_twitter) == 0):
member_twitter = ''
else:
if (len(member_twitter) > 20):
errors = errors + 1
member_twitter_error = 1
else:
p = re.compile('[a-zA-Z0-9\_]+')
if (p.search(member_twitter)):
errors = errors
else:
errors = errors + 1
member_twitter_error = 2
template_values['member_twitter'] = member_twitter
template_values['member_twitter_error'] = member_twitter_error
template_values['member_twitter_error_message'] = member_twitter_error_messages[member_twitter_error]
# Verification: psn
member_psn_error = 0
member_psn_error_messages = ['',
u'PSN ID 长度不能超过 20 个字符',
u'PSN ID 不符合规则'
]
member_psn = self.request.get('psn').strip()
if (len(member_psn) == 0):
member_psn = ''
else:
if (len(member_psn) > 20):
errors = errors + 1
member_psn_error = 1
else:
p = re.compile('^[a-zA-Z0-9\-\_]+$')
if (p.search(member_psn)):
errors = errors
else:
errors = errors + 1
member_psn_error = 2
template_values['member_psn'] = member_psn
template_values['member_psn_error'] = member_psn_error
template_values['member_psn_error_message'] = member_psn_error_messages[member_psn_error]
# Verification: my_home
member_my_home_error = 0
member_my_home_error_messages = ['',
u'不是一个合法的自定义首页跳转位置',
u'自定义首页跳转位置长度不能超过 32 个字符',
u'自定义首页跳转位置必须以 / 开头'
]
member_my_home = self.request.get('my_home').strip()
if len(member_my_home) > 0:
if member_my_home == '/' or member_my_home.startswith('/signout'):
member_my_home_error = 1
errors = errors + 1
else:
if len(member_my_home) > 32:
member_my_home_error = 2
errors = errors + 1
else:
if member_my_home.startswith('/') is not True:
member_my_home_error = 3
errors = errors + 1
template_values['member_my_home'] = member_my_home
template_values['member_my_home_error'] = member_my_home_error
template_values['member_my_home_error_message'] = member_my_home_error_messages[member_my_home_error]
# Verification: btc
member_btc_error = 0
member_btc_error_messages = ['',
u'BTC 收款地址长度不能超过 40 个字符',
u'BTC 收款地址不符合规则'
]
member_btc = self.request.get('btc').strip()
if (len(member_btc) == 0):
member_btc = ''
else:
if (len(member_btc) > 40):
errors = errors + 1
member_btc_error = 1
else:
p = re.compile('^[a-zA-Z0-9]+$')
if (p.search(member_btc)):
errors = errors
else:
errors = errors + 1
member_btc_error = 2
template_values['member_btc'] = member_btc
template_values['member_btc_error'] = member_btc_error
template_values['member_btc_error_message'] = member_btc_error_messages[member_btc_error]
# Verification: github
member_github_error = 0
member_github_error_messages = ['',
u'GitHub 用户名长度不能超过 40 个字符',
u'GitHub 用户名不符合规则'
]
member_github = self.request.get('github').strip()
if (len(member_github) == 0):
member_github = ''
else:
if (len(member_github) > 40):
errors = errors + 1
member_github_error = 1
else:
p = re.compile('^[a-zA-Z0-9\_]+$')
if (p.search(member_github)):
errors = errors
else:
errors = errors + 1
member_github_error = 2
template_values['member_github'] = member_github
template_values['member_github_error'] = member_github_error
template_values['member_github_error_message'] = member_github_error_messages[member_github_error]
# Verification: location
member_location_error = 0
member_location_error_messages = ['',
u'所在地长度不能超过 40 个字符'
]
member_location = self.request.get('location').strip()
if (len(member_location) == 0):
member_location = ''
else:
if (len(member_location) > 40):
errors = errors + 1
member_location_error = 1
template_values['member_location'] = member_location
template_values['member_location_error'] = member_location_error
template_values['member_location_error_message'] = member_location_error_messages[member_location_error]
# Verification: tagline
member_tagline_error = 0
member_tagline_error_messages = ['',
u'个人签名长度不能超过 70 个字符'
]
member_tagline = self.request.get('tagline').strip()
if (len(member_tagline) == 0):
member_tagline = ''
else:
if (len(member_tagline) > 70):
errors = errors + 1
member_tagline_error = 1
template_values['member_tagline'] = member_tagline
template_values['member_tagline_error'] = member_tagline_error
template_values['member_tagline_error_message'] = member_tagline_error_messages[member_tagline_error]
# Verification: bio
member_bio_error = 0
member_bio_error_messages = ['',
u'个人简介长度不能超过 2000 个字符'
]
member_bio = self.request.get('bio').strip()
if (len(member_bio) == 0):
member_bio = ''
else:
if (len(member_bio) > 2000):
errors = errors + 1
member_bio_error = 1
template_values['member_bio'] = member_bio
template_values['member_bio_error'] = member_bio_error
template_values['member_bio_error_message'] = member_bio_error_messages[member_bio_error]
# Verification: show_home_top and show_quick_post
try:
member_show_home_top = int(self.request.get('show_home_top'))
except:
member_show_home_top = 1
try:
member_show_quick_post = int(self.request.get('show_quick_post'))
except:
member_show_quick_post = 0
if member_show_home_top not in [0, 1]:
member_show_home_top = 1
if member_show_quick_post not in [0, 1]:
member_show_quick_post = 0
# Verification: l10n
member_l10n = self.request.get('l10n').strip()
supported = GetSupportedLanguages()
if member_l10n == '':
member_l10n = site.l10n
else:
if member_l10n not in supported:
member_l10n = site.l10n
s = GetLanguageSelect(member_l10n)
template_values['s'] = s
template_values['member_l10n'] = member_l10n
# Verification: twitter_sync
if member.twitter_oauth == 1:
member_twitter_sync = self.request.get('twitter_sync')
if member_twitter_sync == 'on':
member_twitter_sync = 1
else:
member_twitter_sync = 0
template_values['member_twitter_sync'] = member_twitter_sync
# Verification: use_my_css
member_use_my_css = self.request.get('use_my_css')
if member_use_my_css == 'on':
member_use_my_css = 1
else:
member_use_my_css = 0
template_values['member_use_my_css'] = member_use_my_css
# Verification: my_css
member_my_css_error = 0
member_my_css_error_messages = ['',
u'CSS Hack cannot be longer than 2000 characters'
]
member_my_css = self.request.get('my_css').strip()
if (len(member_my_css) == 0):
member_my_css = ''
else:
if (len(member_my_css) > 2000):
errors = errors + 1
member_my_css_error = 1
template_values['member_my_css'] = member_my_css
template_values['member_my_css_error'] = member_my_css_error
template_values['member_my_css_error_message'] = member_my_css_error_messages[member_my_css_error]
template_values['errors'] = errors
if (errors == 0):
member.email = member_email.lower()
member.website = member_website
member.twitter = member_twitter
member.psn = member_psn
member.btc = member_btc
member.github = member_github
member.location = member_location
member.tagline = member_tagline
if member.twitter_oauth == 1:
member.twitter_sync = member_twitter_sync
member.use_my_css = member_use_my_css
member.my_css = member_my_css
if member_my_home_error == 0 and len(member_my_home) > 0:
member.my_home = member_my_home
else:
if member_my_home_error == 0:
member.my_home = None
member.bio = member_bio
member.show_home_top = member_show_home_top
member.show_quick_post = member_show_quick_post
member.l10n = member_l10n
member.put()
memcache.delete('Member::' + str(member.username))
memcache.delete('Member::' + str(member.username_lower))
memcache.set('Member_' + str(member.num), member, 86400)
self.session['message'] = '个人设置成功更新'
self.redirect('/settings')
else:
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'member_settings.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'member_settings.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
self.redirect('/signin')
class SettingsPasswordHandler(webapp.RequestHandler):
def post(self):
site = GetSite()
browser = detect(self.request)
self.session = Session()
template_values = {}
template_values['site'] = site
template_values['page_title'] = site.title + u' › 密码设置'
template_values['system_version'] = SYSTEM_VERSION
errors = 0
member = CheckAuth(self)
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
if (member):
template_values['member'] = member
template_values['member_username'] = member.username
template_values['member_email'] = member.email
# Verification: password
password_error = 0
password_update = False
password_error_messages = ['',
'新密码长度不能超过 32 个字符',
'请输入当前密码',
'当前密码不正确'
]
password_new = self.request.get('password_new').strip()
if (len(password_new) > 0):
password_update = True
if (len(password_new) > 32):
password_error = 1
else:
password_current = self.request.get('password_current').strip()
if (len(password_current) == 0):
password_error = 2
else:
password_current_sha1 = hashlib.sha1(password_current).hexdigest()
if (password_current_sha1 != member.password):
password_error = 3
template_values['password_error'] = password_error
template_values['password_error_message'] = password_error_messages[password_error]
if ((password_error == 0) and (password_update == True)):
old_auth = member.auth
memcache.delete(old_auth)
member.password = hashlib.sha1(password_new).hexdigest()
member.auth = hashlib.sha1(str(member.num) + ':' + member.password).hexdigest()
member.put()
memcache.set(member.auth, member.num, 86400 * 14)
memcache.set('Member_' + str(member.num), member, 86400 * 14)
self.session['message'] = '密码已成功更新,下次请用新密码登录'
self.response.headers['Set-Cookie'] = 'auth=' + member.auth + '; expires=' + (datetime.datetime.now() + datetime.timedelta(days=365)).strftime("%a, %d-%b-%Y %H:%M:%S GMT") + '; path=/'
self.redirect('/settings')
else:
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'member_settings_password.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'member_settings_password.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
self.redirect('/signin')
class SettingsAvatarHandler(webapp.RequestHandler):
def get(self):
site = GetSite()
self.session = Session()
browser = detect(self.request)
template_values = {}
template_values['site'] = site
template_values['page_title'] = site.title + u' › 头像'
template_values['system_version'] = SYSTEM_VERSION
member = CheckAuth(self)
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
if (member):
if 'message' in self.session:
template_values['message'] = self.session['message']
del self.session['message']
template_values['member'] = member
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'member_settings_avatar.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'member_settings_avatar.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
self.redirect('/signin')
def post(self):
site = GetSite()
self.session = Session()
browser = detect(self.request)
template_values = {}
template_values['site'] = site
template_values['system_version'] = SYSTEM_VERSION
member = CheckAuth(self)
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
if (member):
dest = '/settings/avatar'
timestamp = str(int(time.time()))
try:
avatar = self.request.get('avatar')
except:
return self.redirect(dest)
if avatar is None:
return self.redirect(dest)
avatar_len = len(avatar)
if avatar_len == 0:
return self.redirect(dest)
avatar_73 = images.resize(avatar, 73, 73)
avatar_48 = images.resize(avatar, 48, 48)
avatar_24 = images.resize(avatar, 24, 24)
# Large 73x73
q1 = db.GqlQuery("SELECT * FROM Avatar WHERE name = :1", 'avatar_' + str(member.num) + '_large')
if (q1.count() == 1):
avatar_large = q1[0]
avatar_large.content = db.Blob(avatar_73)
avatar_large.put()
else:
qc1 = db.GqlQuery("SELECT * FROM Counter WHERE name = :1", 'avatar.max')
if (qc1.count() == 1):
counter1 = qc1[0]
counter1.value = counter1.value + 1
else:
counter1 = Counter()
counter1.name = 'avatar.max'
counter1.value = 1
counter1.put()
avatar_large = Avatar()
avatar_large.name = 'avatar_' + str(member.num) + '_large'
avatar_large.content = db.Blob(avatar_73)
avatar_large.num = counter1.value
avatar_large.put()
member.avatar_large_url = '/avatar/' + str(member.num) + '/large?r=' + timestamp
member.put()
# Normal 48x48
q2 = db.GqlQuery("SELECT * FROM Avatar WHERE name = :1", 'avatar_' + str(member.num) + '_normal')
if (q2.count() == 1):
avatar_normal = q2[0]
avatar_normal.content = db.Blob(avatar_48)
avatar_normal.put()
else:
qc2 = db.GqlQuery("SELECT * FROM Counter WHERE name = :1", 'avatar.max')
if (qc2.count() == 1):
counter2 = qc2[0]
counter2.value = counter2.value + 1
else:
counter2 = Counter()
counter2.name = 'avatar.max'
counter2.value = 1
counter2.put()
avatar_normal = Avatar()
avatar_normal.name = 'avatar_' + str(member.num) + '_normal'
avatar_normal.content = db.Blob(avatar_48)
avatar_normal.num = counter2.value
avatar_normal.put()
member.avatar_normal_url = '/avatar/' + str(member.num) + '/normal?r=' + timestamp
member.put()
# Mini 24x24
q3 = db.GqlQuery("SELECT * FROM Avatar WHERE name = :1", 'avatar_' + str(member.num) + '_mini')
if (q3.count() == 1):
avatar_mini = q3[0]
avatar_mini.content = db.Blob(avatar_24)
avatar_mini.put()
else:
qc3 = db.GqlQuery("SELECT * FROM Counter WHERE name = :1", 'avatar.max')
if (qc3.count() == 1):
counter3 = qc3[0]
counter3.value = counter3.value + 1
else:
counter3 = Counter()
counter3.name = 'avatar.max'
counter3.value = 1
counter3.put()
avatar_mini = Avatar()
avatar_mini.name = 'avatar_' + str(member.num) + '_mini'
avatar_mini.content = db.Blob(avatar_24)
avatar_mini.num = counter3.value
avatar_mini.put()
member.avatar_mini_url = '/avatar/' + str(member.num) + '/mini?r=' + timestamp
member.put()
# Upload to MobileMe
if config.mobileme_enabled:
headers = {'Authorization' : 'Basic ' + base64.b64encode(config.mobileme_username + ':' + config.mobileme_password)}
host = 'idisk.me.com'
# Sharding
timestamp = str(int(time.time()))
shard = member.num % 31
root = '/' + config.mobileme_username + '/Web/Sites/v2ex/avatars/' + str(shard)
root_mini = root + '/mini'
root_normal = root + '/normal'
root_large = root + '/large'
h = httplib.HTTPConnection(host)
# Mini
h.request('PUT', root_mini + '/' + str(member.num) + '.png', str(avatar_24), headers)
response = h.getresponse()
if response.status == 201 or response.status == 204:
member.avatar_mini_url = 'http://web.me.com/' + config.mobileme_username + '/v2ex/avatars/' + str(shard) + '/mini/' + str(member.num) + '.png?r=' + timestamp
# Normal
h.request('PUT', root_normal + '/' + str(member.num) + '.png', str(avatar_48), headers)
response = h.getresponse()
if response.status == 201 or response.status == 204:
member.avatar_normal_url = 'http://web.me.com/' + config.mobileme_username + '/v2ex/avatars/' + str(shard) + '/normal/' + str(member.num) + '.png?r=' + timestamp
# Large
h.request('PUT', root_large + '/' + str(member.num) + '.png', str(avatar_73), headers)
response = h.getresponse()
if response.status == 201 or response.status == 204:
member.avatar_large_url = 'http://web.me.com/' + config.mobileme_username + '/v2ex/avatars/' + str(shard) + '/large/' + str(member.num) + '.png?r=' + timestamp
member.put()
# Upload to UpYun
if config.upyun_enabled:
u = UpYun(config.upyun_bucket, config.upyun_username, config.upyun_password)
# Mini
mini = avatar_24
u.setContentMD5(md5(mini))
mini_suffix = '/avatars/mini/' + str(member.num) + '.png'
r = u.writeFile(mini_suffix, mini, True)
if r == True:
member.avatar_mini_url = 'http://' + config.upyun_bucket + '.b0.upaiyun.com' + mini_suffix + '?r=' + timestamp
# Normal
normal = avatar_48
u.setContentMD5(md5(normal))
normal_suffix = '/avatars/normal/' + str(member.num) + '.png'
r = u.writeFile(normal_suffix, normal, True)
if r == True:
member.avatar_normal_url = 'http://' + config.upyun_bucket + '.b0.upaiyun.com' + normal_suffix + '?r=' + timestamp
# Large
large = avatar_73
u.setContentMD5(md5(large))
large_suffix = '/avatars/large/' + str(member.num) + '.png'
r = u.writeFile(large_suffix, large, True)
if r == True:
member.avatar_large_url = 'http://' + config.upyun_bucket + '.b0.upaiyun.com' + large_suffix + '?r=' + timestamp
member.put()
memcache.set('Member_' + str(member.num), member, 86400 * 14)
memcache.set('Member::' + member.username_lower, member, 86400 * 14)
memcache.delete('Avatar::avatar_' + str(member.num) + '_large')
memcache.delete('Avatar::avatar_' + str(member.num) + '_normal')
memcache.delete('Avatar::avatar_' + str(member.num) + '_mini')
self.session['message'] = '新头像设置成功'
self.redirect('/settings/avatar')
else:
self.redirect('/signin')
class AvatarStringIO(StringIO):
def __len__(self):
content = self.read()
return len(content)
class MemberBlockHandler(webapp.RequestHandler):
def get(self, key):
go = '/'
member = CheckAuth(self)
if member:
member = db.get(member.key())
one = db.get(db.Key(key))
if one:
if one.num != member.num:
try:
blocked = pickle.loads(member.blocked.encode('utf-8'))
except:
blocked = []
if len(blocked) == 0:
blocked = []
if one.num not in blocked:
blocked.append(one.num)
member.blocked = pickle.dumps(blocked)
member.put()
memcache.set('Member_' + str(member.num), member, 86400)
self.redirect(go)
class MemberUnblockHandler(webapp.RequestHandler):
def get(self, key):
go = '/'
member = CheckAuth(self)
if member:
member = db.get(member.key())
one = db.get(db.Key(key))
if one:
if one.num != member.num:
try:
blocked = pickle.loads(member.blocked.encode('utf-8'))
except:
blocked = []
if len(blocked) == 0:
blocked = []
if one.num in blocked:
blocked.remove(one.num)
member.blocked = pickle.dumps(blocked)
member.put()
memcache.set('Member_' + str(member.num), member, 86400)
self.redirect(go)
def main():
application = webapp.WSGIApplication([
('/member/([a-z0-9A-Z\_\-]+)', MemberHandler),
('/member/([a-z0-9A-Z\_\-]+).json', MemberApiHandler),
('/settings', SettingsHandler),
('/settings/password', SettingsPasswordHandler),
('/settings/avatar', SettingsAvatarHandler),
('/block/(.*)', MemberBlockHandler),
('/unblock/(.*)', MemberUnblockHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
homeassistant/components/home_plus_control/__init__.py | MrDelik/core | 30,023 | 11138819 | """The Legrand Home+ Control integration."""
import asyncio
from datetime import timedelta
import logging
import async_timeout
from homepluscontrol.homeplusapi import HomePlusControlApiError
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import (
config_entry_oauth2_flow,
config_validation as cv,
dispatcher,
)
from homeassistant.helpers.device_registry import async_get as async_get_device_registry
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from . import config_flow, helpers
from .api import HomePlusControlAsyncApi
from .const import (
API,
CONF_SUBSCRIPTION_KEY,
DATA_COORDINATOR,
DISPATCHER_REMOVERS,
DOMAIN,
ENTITY_UIDS,
SIGNAL_ADD_ENTITIES,
)
# Configuration schema for component in configuration.yaml
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
vol.Required(CONF_SUBSCRIPTION_KEY): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
# The Legrand Home+ Control platform is currently limited to "switch" entities
PLATFORMS = [Platform.SWITCH]
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Legrand Home+ Control component from configuration.yaml."""
hass.data[DOMAIN] = {}
if DOMAIN not in config:
return True
# Register the implementation from the config information
config_flow.HomePlusControlFlowHandler.async_register_implementation(
hass,
helpers.HomePlusControlOAuth2Implementation(hass, config[DOMAIN]),
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Legrand Home+ Control from a config entry."""
hass_entry_data = hass.data[DOMAIN].setdefault(entry.entry_id, {})
# Retrieve the registered implementation
implementation = (
await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
)
# Using an aiohttp-based API lib, so rely on async framework
# Add the API object to the domain's data in HA
api = hass_entry_data[API] = HomePlusControlAsyncApi(hass, entry, implementation)
# Set of entity unique identifiers of this integration
uids = hass_entry_data[ENTITY_UIDS] = set()
# Integration dispatchers
hass_entry_data[DISPATCHER_REMOVERS] = []
device_registry = async_get_device_registry(hass)
# Register the Data Coordinator with the integration
async def async_update_data():
"""Fetch data from API endpoint.
This is the place to pre-process the data to lookup tables
so entities can quickly look up their data.
"""
try:
# Note: asyncio.TimeoutError and aiohttp.ClientError are already
# handled by the data update coordinator.
async with async_timeout.timeout(10):
module_data = await api.async_get_modules()
except HomePlusControlApiError as err:
raise UpdateFailed(
f"Error communicating with API: {err} [{type(err)}]"
) from err
# Remove obsolete entities from Home Assistant
entity_uids_to_remove = uids - set(module_data)
for uid in entity_uids_to_remove:
uids.remove(uid)
device = device_registry.async_get_device({(DOMAIN, uid)})
device_registry.async_remove_device(device.id)
# Send out signal for new entity addition to Home Assistant
new_entity_uids = set(module_data) - uids
if new_entity_uids:
uids.update(new_entity_uids)
dispatcher.async_dispatcher_send(
hass,
SIGNAL_ADD_ENTITIES,
new_entity_uids,
coordinator,
)
return module_data
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="home_plus_control_module",
update_method=async_update_data,
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(seconds=300),
)
hass_entry_data[DATA_COORDINATOR] = coordinator
async def start_platforms():
"""Continue setting up the platforms."""
await asyncio.gather(
*(
hass.config_entries.async_forward_entry_setup(entry, platform)
for platform in PLATFORMS
)
)
# Only refresh the coordinator after all platforms are loaded.
await coordinator.async_refresh()
hass.async_create_task(start_platforms())
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload the Legrand Home+ Control config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
if unload_ok:
# Unsubscribe the config_entry signal dispatcher connections
dispatcher_removers = hass.data[DOMAIN][config_entry.entry_id].pop(
"dispatcher_removers"
)
for remover in dispatcher_removers:
remover()
# And finally unload the domain config entry data
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
|
boost/tools/build/v2/test/boostbook.py | randolphwong/mcsema | 11,356 | 11138824 | <filename>boost/tools/build/v2/test/boostbook.py
#!/usr/bin/python
# Copyright 2004, 2006 <NAME>
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
import string
t = BoostBuild.Tester()
t.set_tree("boostbook")
# For some reason, the messages are sent to stderr.
t.run_build_system()
t.fail_test(string.find(t.stdout(), """Writing boost/A.html for refentry(boost.A)
Writing library/reference.html for section(library.reference)
Writing index.html for chapter(library)
Writing docs_HTML.manifest
""") == -1)
t.expect_addition(["html/boost/A.html", "html/index.html"])
t.cleanup()
|
eeauditor/auditors/aws/Amazon_CloudFront_Auditor.py | kbhagi/ElectricEye | 442 | 11138828 | <filename>eeauditor/auditors/aws/Amazon_CloudFront_Auditor.py<gh_stars>100-1000
#This file is part of ElectricEye.
#SPDX-License-Identifier: Apache-2.0
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
import datetime
from dateutil import parser
import uuid
import boto3
from check_register import CheckRegister, accumulate_paged_results
registry = CheckRegister()
cloudfront = boto3.client("cloudfront")
paginator = cloudfront.get_paginator("list_distributions")
response_iterator = paginator.paginate()
results = {"DistributionList": {"Items": []}}
for page in response_iterator:
page_vals = page["DistributionList"].get("Items", [])
results["DistributionList"]["Items"].extend(iter(page_vals))
@registry.register_check("cloudfront")
def cloudfront_active_trusted_signers_check(
cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str
) -> dict:
"""[CloudFront.1] Trusted signers should have key pairs"""
iso8601Time = (datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())
for distributionItem in results["DistributionList"]["Items"]:
distributionId = distributionItem["Id"]
distribution = cloudfront.get_distribution(Id=distributionId)
try:
activeTrustedSigners = distribution["Distribution"]["ActiveTrustedSigners"]["Enabled"]
distributionArn = distribution["Distribution"]["ARN"]
generatorUuid = str(uuid.uuid4())
if not activeTrustedSigners:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cloudfront-active-trusted-signers-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": generatorUuid,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[CloudFront.1] Trusted signers should have key pairs",
"Description": "Distribution "
+ distributionId
+ " has trusted signers without key pairs.",
"Remediation": {
"Recommendation": {
"Text": "For more information on key pairs for CloudFront trusted signers refer to the Creating CloudFront Key Pairs for Your Trusted Signers section of the Amazon CloudFront Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-trusted-signers.html#private-content-creating-cloudfront-key-pairs",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsCloudFrontDistribution",
"Id": distributionArn,
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF ID.AM-2",
"NIST SP 800-53 CM-8",
"NIST SP 800-53 PM-5",
"AICPA TSC CC3.2",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.1.1",
"ISO 27001:2013 A.8.1.2",
"ISO 27001:2013 A.12.5.1",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cloudfront-active-trusted-signers-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": generatorUuid,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[CloudFront.1] Trusted signers should have key pairs",
"Description": "Distribution "
+ distributionId
+ " has trusted signers with key pairs.",
"Remediation": {
"Recommendation": {
"Text": "For more information on key pairs for CloudFront trusted signers refer to the Creating CloudFront Key Pairs for Your Trusted Signers section of the Amazon CloudFront Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-trusted-signers.html#private-content-creating-cloudfront-key-pairs",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsCloudFrontDistribution",
"Id": f"{awsPartition.upper()}::::Account:{awsAccountId}",
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF ID.AM-2",
"NIST SP 800-53 CM-8",
"NIST SP 800-53 PM-5",
"AICPA TSC CC3.2",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.1.1",
"ISO 27001:2013 A.8.1.2",
"ISO 27001:2013 A.12.5.1",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("cloudfront")
def cloudfront_origin_shield_check(
cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str
) -> dict:
iso8601Time = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
)
for distributionItem in results["DistributionList"]["Items"]:
distributionId = distributionItem["Id"]
distribution = cloudfront.get_distribution(Id=distributionId)
try:
originShield = distribution["Distribution"]["DistributionConfig"]["Origins"]["Items"]["OriginShield"]["Enabled"]
distributionArn = distribution["Distribution"]["ARN"]
generatorUuid = str(uuid.uuid4())
if not originShield:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cloudfront-originshield-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": generatorUuid,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[CloudFront.1] Distributions should have Origin Shield enabled",
"Description": "Distribution "
+ distributionId
+ " does not have Origin Shield enabled.",
"Remediation": {
"Recommendation": {
"Text": "For more information on Origin Shield for CloudFront, refer to the Using Amazon CloudFront Origin Shield section of the Amazon CloudFront Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/origin-shield.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsCloudFrontDistribution",
"Id": distributionArn,
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF ID.BE-5",
"NIST CSF PR.PT-5",
"NIST SP 800-53 CP-2",
"NIST SP 800-53 CP-11",
"NIST SP 800-53 SA-13",
"NIST SP 800-53 SA14",
"AICPA TSC CC3.1",
"AICPA TSC A1.2",
"ISO 27001:2013 A.11.1.4",
"ISO 27001:2013 A.17.1.1",
"ISO 27001:2013 A.17.1.2",
"ISO 27001:2013 A.17.2.1",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cloudfront-origin-shield-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": generatorUuid,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[CloudFront.1] Distributions should have Origin Shield enabled",
"Description": "Distribution "
+ distributionId
+ " has Origin Shield enabled.",
"Remediation": {
"Recommendation": {
"Text": "For more information on Origin Shield for CloudFront, refer to the Using Amazon CloudFront Origin Shield section of the Amazon CloudFront Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/origin-shield.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsCloudFrontDistribution",
"Id": f"{awsPartition.upper()}::::Account:{awsAccountId}",
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF ID.BE-5",
"NIST CSF PR.PT-5",
"NIST SP 800-53 CP-2",
"NIST SP 800-53 CP-11",
"NIST SP 800-53 SA-13",
"NIST SP 800-53 SA14",
"AICPA TSC CC3.1",
"AICPA TSC A1.2",
"ISO 27001:2013 A.11.1.4",
"ISO 27001:2013 A.17.1.1",
"ISO 27001:2013 A.17.1.2",
"ISO 27001:2013 A.17.2.1",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("cloudfront")
def cloudfront_default_viewer_check(
cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str
) -> dict:
iso8601Time = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
)
for distributionItem in results["DistributionList"]["Items"]:
distributionId = distributionItem["Id"]
distribution = cloudfront.get_distribution(Id=distributionId)
try:
defaultViewer = distribution["Distribution"]["DistributionConfig"]["ViewerCertificate": {"CloudFrontDefaultCertificate": True}]
distributionArn = distribution["Distribution"]["ARN"]
generatorUuid = str(uuid.uuid4())
if not defaultViewer:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cloudfront-defaultviewer-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": generatorUuid,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[CloudFront.1] Distributions should have a Default Viewer certificate in place",
"Description": "Distribution "
+ distributionId
+ " does not have Default Viewer certificate in place.",
"Remediation": {
"Recommendation": {
"Text": "For more information on Default Viewer certificates for CloudFront, refer to the Requiring HTTPS for Communication Between Viewers and CloudFront section of the Amazon CloudFront Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-https-viewers-to-cloudfront.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsCloudFrontDistribution",
"Id": distributionArn,
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.DS-2",
"NIST SP 800-53 SC-8",
"NIST SP 800-53 SC-11",
"NIST SP 800-53 SC-12",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.13.2.3",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cloudfront-defaultviewer-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": generatorUuid,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[CloudFront.1] Distributions should have a Default Viewer certificate in place",
"Description": "Distribution "
+ distributionId
+ " has Default Viewer certificate in place.",
"Remediation": {
"Recommendation": {
"Text": "For more information on Default Viewer certificates for CloudFront, refer to the Requiring HTTPS for Communication Between Viewers and CloudFront section of the Amazon CloudFront Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-https-viewers-to-cloudfront.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsCloudFrontDistribution",
"Id": f"{awsPartition.upper()}::::Account:{awsAccountId}",
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.DS-2",
"NIST SP 800-53 SC-8",
"NIST SP 800-53 SC-11",
"NIST SP 800-53 SC-12",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.13.2.3",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("cloudfront")
def cloudfront_georestriction_check(
cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str
) -> dict:
iso8601Time = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
)
for distributionItem in results["DistributionList"]["Items"]:
distributionId = distributionItem["Id"]
distribution = cloudfront.get_distribution(Id=distributionId)
try:
geoRestriction = distribution["Distribution"]["DistributionConfig"]["Restrictions"]["GeoRestriction"]["RestrictionType"]["CloudFrontDefaultCertificate": "blacklist"]
distributionArn = distribution["Distribution"]["ARN"]
generatorUuid = str(uuid.uuid4())
if not geoRestriction:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cloudfront-geo-restriction-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": generatorUuid,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[CloudFront.1] Distributions should have Geo Ristriction in place",
"Description": "Distribution "
+ distributionId
+ " does not have Geo Restriction in place.",
"Remediation": {
"Recommendation": {
"Text": "For more information on Geo Restriction for CloudFront, refer to the Restricting the Geographic Distribution of Your Content section of the Amazon CloudFront Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/georestrictions.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsCloudFrontDistribution",
"Id": distributionArn,
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.DS-2",
"NIST SP 800-53 SC-8",
"NIST SP 800-53 SC-11",
"NIST SP 800-53 SC-12",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.13.2.3",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cloudfront-geo-restriction-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": generatorUuid,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[CloudFront.1] Distributions should have Geo Ristriction in place",
"Description": "Distribution "
+ distributionId
+ " has Geo Restriction in place.",
"Remediation": {
"Recommendation": {
"Text": "For more information on Geo Restriction for CloudFront, refer to the Restricting the Geographic Distribution of Your Content section of the Amazon CloudFront Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/georestrictions.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsCloudFrontDistribution",
"Id": f"{awsPartition.upper()}::::Account:{awsAccountId}",
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.DS-2",
"NIST SP 800-53 SC-8",
"NIST SP 800-53 SC-11",
"NIST SP 800-53 SC-12",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.13.2.3",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("cloudfront")
def cloudfront_field_level_encryption_check(
cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str
) -> dict:
iso8601Time = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
)
for distributionItem in results["DistributionList"]["Items"]:
distributionId = distributionItem["Id"]
distribution = cloudfront.get_distribution(Id=distributionId)
try:
fieldLevelEncryption = distribution["Distribution"]["DistributionConfig"]["DefaultCacheBehavior"]["FieldLevelEncryptionId": "string"]
distributionArn = distribution["Distribution"]["ARN"]
generatorUuid = str(uuid.uuid4())
if not fieldLevelEncryption:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cloudfront-field-level-encryption-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": generatorUuid,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[CloudFront.1] Distributions should have Field-Level Encryption in place",
"Description": "Distribution "
+ distributionId
+ " does not have Field Level Encryption in place.",
"Remediation": {
"Recommendation": {
"Text": "For more information on Field-Level Encryption for CloudFront, refer to the Using Field-Level Encryption to Help Protect Sensitive Data section of the Amazon CloudFront Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/field-level-encryption.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsCloudFrontDistribution",
"Id": distributionArn,
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.DS-1",
"NIST SP 800-53 MP-8",
"NIST SP 800-53 SC-12",
"NIST SP 800-53 SC-28",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cloudfront-field-level-encryption-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": generatorUuid,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[CloudFront.1] Distributions should have Field-Level Encryption in place",
"Description": "Distribution "
+ distributionId
+ " does have Field-Level Encryption in place.",
"Remediation": {
"Recommendation": {
"Text": "For more information on Field Level Encryption for CloudFront, refer to the Using Field-Level Encryption to Help Protect Sensitive Data section of the Amazon CloudFront Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/field-level-encryption.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsCloudFrontDistribution",
"Id": distributionArn,
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.DS-1",
"NIST SP 800-53 MP-8",
"NIST SP 800-53 SC-12",
"NIST SP 800-53 SC-28",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("cloudfront")
def cloudfront_waf_enabled_check(
cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str
) -> dict:
iso8601Time = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
)
for distributionItem in results["DistributionList"]["Items"]:
distributionId = distributionItem["Id"]
distribution = cloudfront.get_distribution(Id=distributionId)
try:
wafEnabled = distribution["Distribution"]["DistributionConfig"]["WebACLId": "string"]
distributionArn = distribution["Distribution"]["ARN"]
generatorUuid = str(uuid.uuid4())
if not wafEnabled:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cloudfront-waf-enabled-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": generatorUuid,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[CloudFront.1] Distributions should have WAF enabled",
"Description": "Distribution "
+ distributionId
+ " does not have WAF enabled.",
"Remediation": {
"Recommendation": {
"Text": "For more information on WAF for CloudFront, refer to the Using AWS WAF to Control Access to Your Content section of the Amazon CloudFront Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-awswaf.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsCloudFrontDistribution",
"Id": distributionArn,
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF DE.AE-2",
"NIST SP 800-53 AU-6",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 IR-4",
"NIST SP 800-53 SI-4",
"AICPA TSC CC7.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.16.1.1",
"ISO 27001:2013 A.16.1.4",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cloudfront-waf-enabled-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": generatorUuid,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[CloudFront.1] Distributions should have WAF enabled",
"Description": "Distribution "
+ distributionId
+ " does has WAF enabled.",
"Remediation": {
"Recommendation": {
"Text": "For more information on WAF for CloudFront, refer to the Using AWS WAF to Control Access to Your Content section of the Amazon CloudFront Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-awswaf.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsCloudFrontDistribution",
"Id": distributionArn,
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF DE.AE-2",
"NIST SP 800-53 AU-6",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 IR-4",
"NIST SP 800-53 SI-4",
"AICPA TSC CC7.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.16.1.1",
"ISO 27001:2013 A.16.1.4",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("cloudfront")
def cloudfront_default_tls_check(
cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str
) -> dict:
iso8601Time = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
)
for distributionItem in results["DistributionList"]["Items"]:
distributionId = distributionItem["Id"]
distribution = cloudfront.get_distribution(Id=distributionId)
try:
defaultTls = distribution["Distribution"]["DistributionConfig"]["MinimumProtocolVersion": "TLSv1"]
distributionArn = distribution["Distribution"]["ARN"]
generatorUuid = str(uuid.uuid4())
if not defaultTls:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cloudfront-default-tls-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": generatorUuid,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[CloudFront.1] Distributions should have Default TLS enabled",
"Description": "Distribution "
+ distributionId
+ " does not have Default TLS enabled.",
"Remediation": {
"Recommendation": {
"Text": "For more information on Default TLS settings for CloudFront, refer to the Creating, Updating, and Deleting Distributions section of the Amazon CloudFront Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValues-security-policy",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsCloudFrontDistribution",
"Id": distributionArn,
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.DS-2",
"NIST SP 800-53 SC-8",
"NIST SP 800-53 SC-11",
"NIST SP 800-53 SC-12",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.13.2.3",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cloudfront-default-tls-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": generatorUuid,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[CloudFront.1] Distributions should have Default TLS enabled",
"Description": "Distribution "
+ distributionId
+ " does have Default TLS enabled.",
"Remediation": {
"Recommendation": {
"Text": "For more information on Default TLS settings for CloudFront, refer to the Creating, Updating, and Deleting Distributions section of the Amazon CloudFront Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValues-security-policy",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsCloudFrontDistribution",
"Id": distributionArn,
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.DS-2",
"NIST SP 800-53 SC-8",
"NIST SP 800-53 SC-11",
"NIST SP 800-53 SC-12",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.13.2.3",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("cloudfront")
def cloudfront_custom_origin_tls_check(
cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str
) -> dict:
iso8601Time = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
)
for distributionItem in results["DistributionList"]["Items"]:
distributionId = distributionItem["Id"]
distribution = cloudfront.get_distribution(Id=distributionId)
try:
customOriginTls = distribution["Distribution"]["DistributionConfig"]["Origins"]["Items"]["Origins"]["CustomOriginConfig"]["OriginSslProtocols"]["Items": "TLSv1.2"]
distributionArn = distribution["Distribution"]["ARN"]
generatorUuid = str(uuid.uuid4())
if not customOriginTls:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cloudfront-custom-origin-tls-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": generatorUuid,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[CloudFront.1] Distributions using Custom Origins should be using TLSv1.2",
"Description": "Distribution "
+ distributionId
+ " has Custom Origins not using TLSv1.2.",
"Remediation": {
"Recommendation": {
"Text": "For more information on Custom Origin TLS settings for CloudFront, refer to the Values That You Specify When You Create or Update a Distribution section of the Amazon CloudFront Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsCloudFrontDistribution",
"Id": distributionArn,
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.DS-2",
"NIST SP 800-53 SC-8",
"NIST SP 800-53 SC-11",
"NIST SP 800-53 SC-12",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.13.2.3",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cloudfront-custom-origin-tls-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": generatorUuid,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[CloudFront.1] Distributions using Custom Origins should be using TLSv1.2",
"Description": "Distribution "
+ distributionId
+ " has Custom Origins using TLSv1.2.",
"Remediation": {
"Recommendation": {
"Text": "For more information on Custom Origin TLS settings for CloudFront, refer to the Values That You Specify When You Create or Update a Distribution section of the Amazon CloudFront Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsCloudFrontDistribution",
"Id": distributionArn,
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.DS-2",
"NIST SP 800-53 SC-8",
"NIST SP 800-53 SC-11",
"NIST SP 800-53 SC-12",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.13.2.3",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
|
web/tests/functional/component/__init__.py | ryankurte/codechecker | 1,601 | 11138873 | # coding=utf-8
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""Setup for the test package component."""
import os
import shutil
import sys
import uuid
from libtest import codechecker
from libtest import env
from libtest import project
# Test workspace should be initialized in this module.
TEST_WORKSPACE = None
def setup_package():
"""Setup the environment for testing components."""
global TEST_WORKSPACE
TEST_WORKSPACE = env.get_workspace('component')
os.environ['TEST_WORKSPACE'] = TEST_WORKSPACE
test_project = 'cpp'
test_config = {}
project_info = project.get_info(test_project)
test_project_path = os.path.join(TEST_WORKSPACE, "test_proj")
shutil.copytree(project.path(test_project), test_project_path)
project_info['project_path'] = test_project_path
test_project_name = project_info['name'] + '_' + uuid.uuid4().hex
test_config['test_project'] = project_info
suppress_file = None
skip_list_file = None
test_env = env.test_env(TEST_WORKSPACE)
codechecker_cfg = {
'suppress_file': suppress_file,
'skip_list_file': skip_list_file,
'check_env': test_env,
'workspace': TEST_WORKSPACE,
'checkers': []
}
# Start or connect to the running CodeChecker server and get connection
# details.
print("This test uses a CodeChecker server... connecting...")
server_access = codechecker.start_or_get_server(auth_required=True)
server_access['viewer_product'] = 'component'
codechecker.add_test_package_product(server_access, TEST_WORKSPACE)
# Extend the checker configuration with the server access.
codechecker_cfg.update(server_access)
# Clean the test project, if needed by the tests.
ret = project.clean(test_project_path)
if ret:
sys.exit(ret)
ret = codechecker.check_and_store(codechecker_cfg,
test_project_name,
test_project_path)
if ret:
sys.exit(1)
print("Analyzing test project was succcessful.")
# Save the run names in the configuration.
codechecker_cfg['run_names'] = [test_project_name]
test_config['codechecker_cfg'] = codechecker_cfg
# Export the test configuration to the workspace.
env.export_test_cfg(TEST_WORKSPACE, test_config)
def teardown_package():
"""Clean up after the test."""
# TODO: If environment variable is set keep the workspace
# and print out the path.
global TEST_WORKSPACE
check_env = env.import_test_cfg(TEST_WORKSPACE)[
'codechecker_cfg']['check_env']
codechecker.remove_test_package_product(TEST_WORKSPACE, check_env)
print("Removing: " + TEST_WORKSPACE)
shutil.rmtree(TEST_WORKSPACE, ignore_errors=True)
|
examples/convert/agilent2pipe_3d/agilent2pipe_3d.py | genematx/nmrglue | 150 | 11138876 | #! /usr/bin/env python
import nmrglue as ng
# read in the Agilent data (any of the follow lines will work)
#dic, data=ng.varian.read("agilent_3d")
dic, data=ng.varian.read_lowmem("agilent_3d")
# Set the spectral parameters
udic = ng.varian.guess_udic(dic, data)
# Direct Dimension
udic[2]['size'] = 1250
udic[2]['complex'] = True
udic[2]['encoding'] = 'direct'
udic[2]['sw'] = 50000.0
udic[2]['obs'] = 125.676
udic[2]['car'] = 56.0 * 125.676
udic[2]['label'] = 'CX'
# First indirect dimension
udic[1]['size'] = 88
udic[1]['complex'] = True
udic[1]['encoding'] = 'states'
udic[1]['sw'] = 2777.778
udic[1]['obs'] = 50.648
udic[1]['car'] = 120.0 * 50.648
udic[1]['label'] = 'N'
# Second indirect dimension
udic[0]['size'] = 128
udic[0]['complex'] = True
udic[0]['encoding'] = 'states'
udic[0]['sw'] = 5555.556
udic[0]['obs'] = 125.676
udic[0]['car'] = 56.0 * 125.676
udic[0]['label'] = 'CA'
# create the converter object and initilize with Agilent data
C = ng.convert.converter()
C.from_varian(dic, data, udic)
# create NMRPipe data and then write it out
ng.pipe.write("./data/3d_pipe%03d.fid", *C.to_pipe(), overwrite=True)
|
src/appier/request.py | BeeMargarida/appier | 127 | 11138890 | <filename>src/appier/request.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Appier Framework
# Copyright (c) 2008-2021 Hive Solutions Lda.
#
# This file is part of Hive Appier Framework.
#
# Hive Appier Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Appier Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Appier Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "<NAME> <<EMAIL>>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2021 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import json
import time
import base64
import datetime
from . import util
from . import config
from . import legacy
from . import session
from . import exceptions
CODE_STRINGS = {
100 : "Continue",
101 : "Switching Protocols",
200 : "OK",
201 : "Created",
202 : "Accepted",
203 : "Non-Authoritative Information",
204 : "No Content",
205 : "Reset Content",
206 : "Partial Content",
207 : "Multi-Status",
301 : "Moved permanently",
302 : "Found",
303 : "See Other",
304 : "Not Modified",
305 : "Use Proxy",
306 : "(Unused)",
307 : "Temporary Redirect",
400 : "Bad Request",
401 : "Unauthorized",
402 : "Payment Required",
403 : "Forbidden",
404 : "Not Found",
405 : "Method Not Allowed",
406 : "Not Acceptable",
407 : "Proxy Authentication Required",
408 : "Request Timeout",
409 : "Conflict",
410 : "Gone",
411 : "Length Required",
412 : "Precondition Failed",
413 : "Request Entity Too Large",
414 : "Request-URI Too Long",
415 : "Unsupported Media Type",
416 : "Requested Range Not Satisfiable",
417 : "Expectation Failed",
500 : "Internal Server Error",
501 : "Not Implemented",
502 : "Bad Gateway",
503 : "Service Unavailable",
504 : "Gateway Timeout",
505 : "HTTP Version Not Supported"
}
""" Dictionary associating the error code as integers
with the official descriptive message for it """
class Request(object):
"""
Request class that acts as a proxy for the both
the input and output of an HTTP request.
Other responsibilities should include indirect
session management and serialization.
"""
ALIAS = ("session_id", "_sid")
""" The list of strings that are considered to represent
and alias to the session identifier name, this values may
be changed in case the app required loading of the session
using a different attribute name """
def __init__(
self,
owner = None,
method = "GET",
path = "/",
prefix = "/",
query = "",
scheme = None,
address = None,
protocol = None,
params = {},
data_j = {},
environ = {},
session_c = session.FileSession
):
self.owner = owner
self.method = method
self.path = path
self.prefix = prefix
self.query = query
self.scheme = scheme
self.address = address
self.protocol = protocol
self.params = params
self.data_j = data_j
self.environ = environ
self.session_c = session_c
self.handled = False
self.stime = None
self.etime = None
self.context = None
self.method_i = None
self.exception = None
self.stacktrace = None
self.json = False
self.code = 200
self.location = prefix + util.quote(path).lstrip("/")
self.encoding = "utf-8"
self.content_type = None
self.cache_control = None
self.authorization = None
self.data = None
self.result = None
self.result_l = None
self.locale = None
self.language = None
self.query_s = None
self.prefixes = None
self.session = session.MockSession(self)
self.set_cookie = None
self.set_cookie_prefix = None
self.set_cookie_suffix = None
self.post = {}
self.files = {}
self.args = {}
self.cookies = {}
self.in_headers = {}
self.out_headers = {}
self.warnings = []
self.properties = {}
self._closed = False
self._params = None
def __str__(self):
return str(self.repr())
def __unicode__(self):
return self.__str__()
def close(self):
"""
Called upon the closing of the request so that the internal references
the request contains may be related (avoiding memory leaks).
After a request is closed it cannot be re-opened and a new request must
be created instead if a new one is required.
"""
self.owner = None
self.method = None
self.path = None
self.prefix = None
self.query = None
self.scheme = None
self.address = None
self.protocol = None
self.params = None
self.data_j = None
self.environ = None
self.session_c = None
self.handled = None
self.context = None
self.method_i = None
self.json = None
self.code = None
self.location = None
self.encoding = None
self.content_type = None
self.cache_control = None
self.authorization = None
self.data = None
self.result = None
self.result_l = None
self.locale = None
self.language = None
self.query_s = None
self.prefixes = None
self.session = None
self.set_cookie = None
self.set_cookie_prefix = None
self.set_cookie_suffix = None
self.post = None
self.files = None
self.args = None
self.cookies = None
self.in_headers = None
self.out_headers = None
self.warnings = None
self.properties = None
self._closed = True
self._params = None
def handle(self, code = None, result = ""):
"""
Handles the current request, setting the response code
and the resulting data that is going to be returned to
the current client.
This method should not be called directly by any of the
action functions but only by the internal appier structures
or any other extra middleware handler. Because of that
this method should be considered internal and must be
used with extreme care.
If this method is called the request is considered to be
handled and the "typical" action function based handling
is going to be skipped.
:type code: int
:param code: The status code that is going to be set for
the response associated with the current request.
:type result: Object
:param result: Value that is returned to the handling
infra-structure as the result of the handling, proper data
types may vary and the include: strings, dictionaries or
generator objects.
"""
self.handled = True
self.code = self.code if self.code else 200
self.code = code if code else self.code
self.result = result
def flush(self):
"""
Flushes the current request information meaning that the
current request information is updated so that it's state
is persisted into the current client's information.
This method should always be called at the end of the request
handling workflow.
"""
self.session.flush(self)
def repr(self):
"""
Returns the dictionary based representation of the current
request this information may be used both for debugging or
logging purposes.
For performance reasons this method should be used the least
amount of times possible.
:rtype: Dictionary
:return: The dictionary containing the information about
the current request.
"""
return dict(
method = self.method,
path = self.path,
prefix = self.prefix,
query = self.query,
scheme = self.scheme,
encoding = self.encoding,
content_type = self.content_type,
cache_conrtol = self.cache_control
)
def warning(self, message):
message_t = type(message)
if message_t in legacy.STRINGS:
message = dict(message = message)
elif not message_t == dict:
raise exceptions.OperationalError(
message = "Invalid message type '%s'" % message_t
)
self.warnings.append(message)
def get_params(self):
if self._params: return self._params
self._params = {}
for key, value in self.params.items(): self._params[key] = value[0]
return self._params
def get_param(self, name, default = None):
if not name in self.params: return default
value = self.params[name]
return value[0] if value else default
def set_params(self, params):
self.params = params
self.extend_args(params)
def set_post(self, post):
self.post = post
self.extend_args(post)
def set_files(self, files):
self.files = files
self.extend_args(files)
def set_multipart(self, post, files, ordered):
self.post = post
self.files = files
self.extend_args(ordered)
def get_data(self):
return self.data
def set_data(self, data):
self.data = data
def get_json(self):
return self.data_j
def set_json(self, data_j):
self.data_j = data_j
def set_code(self, code):
self.code = code
def get_encoded(self, encoding = None, safe = True):
if encoding == None: encoding = self.get_encoding()
if not encoding: return self.data
try:
return legacy.u(self.data, encoding = encoding, force = True)
except UnicodeDecodeError:
if not safe: raise
return self.data
def extend_args(self, args):
is_dict = isinstance(args, dict)
args = args.items() if is_dict else args
for key, value in args:
_value = self.args.get(key, [])
_value.extend(value)
self.args[key] = _value
def get_encoding(self):
return self.encoding
def set_encoding(self, encoding):
self.encoding = encoding
def get_content_type(self):
return self.content_type
def set_content_type(self, content_type):
self.content_type = content_type
def default_content_type(self, default):
if self.content_type: return
if self.has_header_out("Content-Type"): return
self.content_type = default
def get_cache_control(self):
return self.cache_control
def set_cache_control(self, cache_control):
self.cache_control = cache_control
def default_cache_control(self, default):
if self.cache_control: return
if self.has_header_out("Cache-Control"): return
self.cache_control = default
def get_header(self, name, default = None, normalize = True):
if normalize: name = name.title()
return self.in_headers.get(name, default)
def set_header(self, name, value, normalize = False, replace = True):
if normalize: name = name.title()
if not replace and name in self.out_headers: return
self.out_headers[name] = legacy.ascii(value)
def ensure_header(self, name, value, normalize = False):
self.set_header(name, value, normalize = normalize, replace = False)
def set_headers(self, headers):
if not headers: return
for name, value in headers.items():
self.set_header(name, value)
def set_headers_l(self, headers_l):
for name, value in headers_l:
self.set_header(name, value)
def has_header_in(self, name, insensitive = True):
if insensitive: name = name.title()
return name in self.in_headers
def has_header_out(self, name, insensitive = True):
if insensitive: name = name.title()
return name in self.out_headers
def set_headers_b(self):
content_type = self.get_content_type() or "text/plain"
cache_control = self.get_cache_control()
self.set_header("Content-Type", content_type)
if cache_control: self.set_header("Cache-Control", cache_control)
def get_address(self, resolve = True, cleanup = True):
"""
Retrieves the client (network) address associated with the
current request/connection.
In case the resolve flag is set the resolution takes a more
complex approach to avoid the common problems when used proxy
based connections (indirection in connection).
If cleanup mode is used it means that IPv4 addresses encapsulated
in IPv6 addresses will have the prefix removed (eg: fc00:e968:6179::de52:7100:127.0.0.01
will become just 127.0.0.1).
:type resolve: bool
:param resolve: If the complex resolution process for the
network address should take place.
:type cleanup: bool
:param cleanup: If the final value should have the possible
IPv6 to IPv4 address prefix removed.
:rtype: String
:return: The resolved client (network) address.
"""
if resolve:
address = self.get_header("X-Forwarded-For", self.address)
address = self.get_header("X-Client-IP", address)
address = self.get_header("X-Real-IP", address)
address = address.split(",", 1)[0].strip() if address else address
else:
address = self.address
if cleanup and address and address.startswith("::ffff:"):
address = address[7:]
return address
def get_host(self, resolve = True):
"""
Retrieves the hostname (as a string) associated with the
current request.
In case the resolve flag is set the resolution takes a more
complex approach to avoid the common problems when used proxy
based connections (indirection in connection).
:type resolve: bool
:param resolve: If the complex resolution process for the
hostname should take place.
:rtype: String
:return: The resolved hostname string value.
"""
server_name = self.environ.get("SERVER_NAME", "localhost")
server_port = int(self.environ.get("SERVER_PORT", "80"))
server_host = "%s:%d" % (server_name, server_port)
host = self.get_header("Host", server_host)
if resolve: host = self.get_header("X-Forwarded-Host", host)
return host
def get_url(self, resolve = True):
host = self.get_host(resolve = resolve)
if not host: return
if not self.scheme: return
query_s = "?%s" % self.query if self.query else ""
return "%s://%s%s%s" % (self.scheme, host, self.path, query_s)
def resolve_params(self):
self.params = self._resolve_p(self.params)
def resolve_query_s(self, prefixes = ("x-",)):
parts = [part for part in self.query.split("&") if not part.startswith(prefixes)]
self.query_s = "&".join(parts)
self.prefixes = prefixes
def load_base(self):
self.load_data()
self.load_form()
self.load_authorization()
self.load_headers()
self.load_session()
def load_data(self):
# initializes the various structures associated with the data loading
# and/or parsing, so that the request is correctly populated
self.data_j = None
self.post = {}
self.files = {}
# verifies if the current data attribute contains a valid value in case
# it does not returns immediately as there's nothing to be loaded
if not self.data: return
# tries to retrieve the current content type value set in the environment
# then splits it around the separator to retrieve the mime type
content_type = self.environ.get("CONTENT_TYPE", "application/json")
content_type = self.environ.get("HTTP_CONTENT_TYPE", content_type)
content_type_s = content_type.split(";")
mime_type = content_type_s[0].strip()
if mime_type == "application/json":
data = self.data.decode("utf-8") if self.data else None
try: self.data_j = json.loads(data) if data else None
except Exception: pass
elif mime_type == "application/x-www-form-urlencoded":
data = legacy.str(self.data) if self.data else None
post = legacy.parse_qs(
data,
keep_blank_values = True
) if self.data else {}
post = util.decode_params(post)
self.set_post(post)
elif mime_type == "multipart/form-data":
boundary = content_type_s[1].strip()
post, files, ordered = util.parse_multipart(self.data, boundary)
self.set_multipart(post, files, ordered)
def load_form(self):
self.params_s = util.load_form(self.params)
self.post_s = util.load_form(self.post)
self.files_s = util.load_form(self.files)
def load_authorization(self):
# tries to decode the provided authorization header into it's own
# components of username and password, in case the structure of the
# provided string is not compliant an exception is raised
authorization = self.environ.get("HTTP_AUTHORIZATION", None)
if not authorization: return
parts = authorization.split(" ", 1)
if not len(parts) == 2: raise exceptions.OperationalError(
message = "Invalid authorization header"
)
_method, value = parts
value = base64.b64decode(value)
value = legacy.str(value)
parts = value.split(":", 1)
if not len(parts) == 2: raise exceptions.OperationalError(
message = "Invalid authorization header"
)
self.authorization = tuple(parts)
def load_headers(self):
for key, value in self.environ.items():
if not key.startswith("HTTP_"): continue
key = key[5:]
parts = key.split("_")
parts = [part.title() for part in parts]
key_s = "-".join(parts)
self.in_headers[key_s] = value
def load_session(self):
self.load_mock()
self.load_cookies()
self.set_alias()
self.set_session()
def load_mock(self):
self.session.address = self.get_address()
def load_cookies(self):
cookie_s = self.environ.get("HTTP_COOKIE", "")
self.cookies = util.parse_cookie(cookie_s)
def locale_s(self, value = None):
value = value or self.locale
return value.replace("_", "-").lower()
def locale_b(self, value = None):
value = value or self.locale
return value.replace("-", "_").lower()
def locale_l(self, value = None):
value = value or self.locale
return value.split("_", 1)[0]
def load_locale(self, available, fallback = "en_us", ensure = True):
# tries to gather the best locale value using the currently
# available strategies and in case the retrieved local is part
# of the valid locales for the app returns the locale, otherwise
# returns the fallback value instead (using the first available
# locale in case the ensure flag is set)
locale = self.get_locale(fallback = fallback)
locale = self.locale_b(locale)
language = self.locale_l(locale)
if locale in available: return self.set_locale(locale)
if language in available: return self.set_locale(locale)
if ensure and available: return self.set_locale(available[0])
return self.set_locale(fallback)
def get_locale(self, fallback = "en_us"):
# tries to retrieve the locale value from the provided URL
# parameters (this is the highest priority) and in case it
# exists returns this locale immediately
locale = self.params.get("locale", None)
if locale: return locale[0]
# uses the currently loaded session to try to gather the locale
# value from it and in case it's valid and exists returns it
locale = self.session.get("locale", None)
if locale: return locale
# gathers the complete set of language values set in the accept
# language header and in case there's at least one value returned
# returns the first of these values as the locale
langs = self.get_langs()
if langs: return langs[0]
# defines the locale as the global wide appier configuration and
# tries to retrieve the value of it in case it's valid uses it
locale = config.conf("LOCALE", None)
if locale: return locale
# in case this code entry is reached all the strategies for locale
# retrieval have failed and so the fallback value is returned
return fallback
def set_locale(self, locale):
# sets both the base locale value but also the language attribute
# that may be used to determine the base (less specific) language value
self.locale = locale
self.language = self.locale_l(locale)
def get_langs(self):
# gathers the value of the accept language header and in case
# it's not defined returns immediately as no language can be
# determined using the currently provided headers
accept_language = self.in_headers.get("Accept-Language", None)
if not accept_language: return ()
# starts the list that is going to be used to store the various
# languages "recovered" from the accept language header, note that
# the order of these languages should be from the most relevant to
# the least relevant as defined in HTTP specification
langs = []
# splits the accept language header into the various components of
# it and then iterates over each of them splitting each of the
# components into the proper language string and priority
parts = accept_language.split(",")
for part in parts:
values = part.split(";", 1)
value_l = len(values)
if value_l == 1: lang, = values
else: lang, _priority = values
lang = lang.replace("-", "_")
lang = lang.lower()
langs.append(lang)
# returns the complete list of languages that have been extracted
# from the accept language header these list may be empty in case
# the header was not parsed correctly or there's no contents in it
return langs
def set_alias(self):
for alias in Request.ALIAS:
if not alias in self.params: continue
self.params["sid"] = self.params[alias]
for alias in Request.ALIAS:
if not alias in self.post: continue
self.post["sid"] = self.post[alias]
for alias in Request.ALIAS:
if not alias in self.args: continue
self.args["sid"] = self.args[alias]
def set_session(self, create = False):
# tries to retrieves the session id (SID) from all the
# possible sources so that something may be used in the
# identification of the current request
sid = self.cookies.get("sid", None)
sid = self.post.get("sid", (None,))[0] or sid
sid = self.params.get("sid", (None,))[0] or sid
# in case the data type of the currently provided session
# identifier is not unicode based converts it into a string
# so that it may be used to correctly retrieve the associated
# session object from the underlying session class repository
sid = str(sid) if type(sid) == legacy.UNICODE else sid
# tries to retrieve the session reference for the
# provided SID (session id) in case there's an exception
# defaults to unset session so that a new one gets created
try: session = self.session_c.get_s(sid, request = self)
except Exception: session = None
# in case no valid session exists a new one must be created
# so that the user may be able to interact with the system
# with some kind of memory/persistence otherwise sets the
# loaded session in the current request (to be used)
if session: self.session = session
elif create: self.session = self.session_c.new(address = self.address)
def get_session(self):
return self.session
def get_session_agent(self):
if not self.session: return None
if not self.session.is_loaded(): return None
username = self.session.get("username", None)
if username: return username
email = self.session.get("email", None)
if email: return email
id = self.session.get("id", None)
if id: return id
return None
def get_warnings(self):
return self.warnings
def get_set_cookie(self, lang = "en", path = "/", delta = 31536000):
base = self.set_cookie
if not base: return base
expires = time.time() + delta
expires_d = datetime.datetime.fromtimestamp(expires)
with util.ctx_locale(): expires_s = expires_d.strftime("%a, %m %b %Y %H:%M:%S GMT")
set_cookie = "%s%s;lang=%s;path=%s;expires=%s;%s" % (
self.set_cookie_prefix or self.owner.set_cookie_prefix or "",
base,
lang,
path,
expires_s,
self.set_cookie_suffix or self.owner.set_cookie_suffix or "",
)
return set_cookie
def get_headers(self):
return self.get_out_headers()
def get_in_headers(self):
headers = self.in_headers.items()
return legacy.eager(headers)
def get_out_headers(self):
headers = self.out_headers.items()
return legacy.eager(headers)
def get_code_s(self):
code_s = CODE_STRINGS.get(self.code, "Unknown")
code_s = str(self.code) + " " + code_s
return code_s
def get_sdate(self, format = "%d/%b/%Y:%H:%M:%S +0000"):
sdate = datetime.datetime.utcfromtimestamp(self.stime)
return sdate.strftime(format)
def get_edate(self, format = "%d/%b/%Y:%H:%M:%S +0000"):
edate = datetime.datetime.utcfromtimestamp(self.etime)
return edate.strftime(format)
def is_mobile(self):
user_agent = self.get_header("User-Agent")
return util.is_mobile(user_agent)
def is_tablet(self):
user_agent = self.get_header("User-Agent")
return util.is_tablet(user_agent)
def is_browser(self):
user_agent = self.get_header("User-Agent")
return util.is_browser(user_agent)
def is_bot(self):
user_agent = self.get_header("User-Agent")
return util.is_bot(user_agent)
def is_success(self):
return self.code == 200
def is_empty(self):
return self.code in (204, 304)
def is_mock(self):
return False
@property
def location_f(self, safe = True):
query = self.query_s if safe else self.query
if not query: return self.location
return self.location + "?" + query
@property
def params_f(self, safe = True):
if not safe: return self.params_s
if hasattr(self, "_params_f"): return self._params_f
self._params_f = dict([(key, value) for key, value in self.params_s.items() if\
not key.startswith(self.prefixes)])
return self._params_f
@property
def duration(self, milliseconds = True, safe = True):
if not self.stime: return None
if not self.etime and not safe: return None
etime = self.etime or time.time()
duration = etime - self.stime
if not milliseconds: return duration
return duration * 1000.0
@property
def in_length(self):
data = self.get_data()
if not data: return 0
return len(data)
@property
def out_length(self, safe = True):
return self.result_l or 0
@property
def asynchronous(self):
return True if self.get_header("X-Async") else False
@property
def partial(self):
return True if self.get_header("X-Partial") else False
@property
def browser_info(self):
user_agent = self.get_header("User-Agent")
return util.browser_info(user_agent)
def _resolve_p(self, params):
secret = self.session.get("secret", None)
if not secret: return params
raise exceptions.AppierException(message = "Not implemented")
class MockRequest(Request):
"""
Mock request class, that is meant to be used for situations
where no web oriented request is possible to be retried or
the logic being running outside of web request.
It's recommended to keep only one instance of a mock request
per each execution life-cycle.
"""
def __init__(self, locale = "en_us", *args, **kwargs):
Request.__init__(self, *args, **kwargs)
self.files_s = dict()
self.post_s = dict()
self.params_s = dict()
self.set_locale(locale)
def close(self):
pass
def is_mock(self):
return True
|
setup.py | jmoon1506/python-sonic | 263 | 11138896 | <gh_stars>100-1000
from setuptools import setup
# To use a consistent encoding
# python setup.py sdist bdist_wheel
# python -m twine upload dist/*
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
requirements = [
'python-osc',
]
setup_requirements = [
'pytest-runner',
]
test_requirements = [
'pytest',
]
setup(
name='python-sonic',
version='0.4.3',
description='Programming Music with Sonic Pi or Supercollider',
long_description=long_description,
url='https://github.com/gkvoelkl/python-sonic',
author='gkvoelkl',
author_email='<EMAIL>',
packages=[
'psonic',
'psonic.samples',
'psonic.samples.loops',
'psonic.internals',
],
license='MIT',
zip_safe=False,
include_package_data=True,
install_requires=requirements,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Multimedia :: Sound/Audio',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
keywords= [
'music',
'sonic pi',
'raspberry pi',
'audio',
'music composition',
'scsynth',
'supercollider',
'synthesis'
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
|
tests/autocast_test.py | gglin001/poptorch | 128 | 11138945 | <gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import pytest
import torch
import poptorch
import helpers
@pytest.mark.parametrize("setting", {"default", "true", "false"})
@helpers.printCapfdOnExit
@helpers.overridePoptorchLogLevel("TRACE")
def test_autocast_decorator(capfd, setting):
class ModelDefault(torch.nn.Module):
@poptorch.autocast()
def forward(self, x, y):
return torch.bmm(x, y)
class ModelTrue(torch.nn.Module):
@poptorch.autocast(True)
def forward(self, x, y):
return torch.bmm(x, y)
class ModelFalse(torch.nn.Module):
@poptorch.autocast(False)
def forward(self, x, y):
return torch.bmm(x, y)
if setting == "true":
model = ModelTrue()
elif setting == "false":
model = ModelFalse()
else:
model = ModelDefault()
torch.manual_seed(42)
x = torch.randn(1, 20, 20)
y = torch.randn(1, 20, 20)
poptorch_model = poptorch.inferenceModel(model)
poptorch_model(x, y)
testlog = helpers.LogChecker(capfd)
test_type = "Float" if setting == "false" else "Half"
testlog.assert_contains(
f"{test_type}(1, 20, 20, strides=[400, 20, 1], requires_grad=0,"
" device=cpu) = popart::matmul")
@pytest.mark.parametrize("setting", {"default", "true", "false"})
@helpers.printCapfdOnExit
@helpers.overridePoptorchLogLevel("TRACE")
def test_autocast_block(capfd, setting):
class ModelDefault(torch.nn.Module):
def forward(self, x, y):
with poptorch.autocast():
return torch.bmm(x, y)
class ModelTrue(torch.nn.Module):
def forward(self, x, y):
with poptorch.autocast(True):
return torch.bmm(x, y)
class ModelFalse(torch.nn.Module):
def forward(self, x, y):
with poptorch.autocast(False):
return torch.bmm(x, y)
if setting == "true":
model = ModelTrue()
elif setting == "false":
model = ModelFalse()
else:
model = ModelDefault()
torch.manual_seed(42)
x = torch.randn(1, 20, 20)
y = torch.randn(1, 20, 20)
poptorch_model = poptorch.inferenceModel(model)
poptorch_model(x, y)
testlog = helpers.LogChecker(capfd)
test_type = "Float" if setting == "false" else "Half"
testlog.assert_contains(
f"{test_type}(1, 20, 20, strides=[400, 20, 1], requires_grad=0,"
" device=cpu) = popart::matmul")
@pytest.mark.parametrize("setting", {"default", "true", "false"})
@helpers.printCapfdOnExit
@helpers.overridePoptorchLogLevel("TRACE")
def test_enable_autocast(capfd, setting):
torch.manual_seed(42)
x = torch.randn(1, 1, 20, 20)
model = torch.nn.Conv2d(1, 20, 5)
model.autocast()
opts = poptorch.Options()
if setting == "true":
opts.Precision.autocastEnabled(True)
elif setting == "false":
opts.Precision.autocastEnabled(False)
poptorch_model = poptorch.inferenceModel(model, opts)
poptorch_model(x)
testlog = helpers.LogChecker(capfd)
test_type = "Float" if setting == "false" else "Half"
testlog.assert_contains(
f"{test_type}(1, 20, 16, 16, strides=[5120, 256, 16, 1],"
" requires_grad=1, device=cpu) = popart::conv")
@pytest.mark.parametrize("setting", {"hff", "hfh", "hhf", "default"})
@helpers.printCapfdOnExit
@helpers.overridePoptorchLogLevel("TRACE")
def test_autocast_policy(capfd, setting):
class PolicyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 20, 20)
def forward(self, x):
return torch.relu(0.5 * self.conv.forward(x))
torch.manual_seed(42)
x = torch.randn(1, 1, 20, 20)
model = PolicyModel()
model.autocast()
if setting == "hff":
policy = poptorch.autocasting.Policy([torch.nn.Conv2d],
[torch.mul, torch.relu])
elif setting == "hfh":
policy = poptorch.autocasting.Policy([torch.nn.Conv2d, torch.relu],
[torch.mul])
elif setting == "hhf":
policy = poptorch.autocasting.Policy([torch.mul],
[torch.nn.Conv2d, torch.relu])
else:
policy = poptorch.autocasting.Policy()
opts = poptorch.Options()
opts.Precision.autocastPolicy(policy)
poptorch_model = poptorch.inferenceModel(model, opts)
poptorch_model(x)
testlog = helpers.LogChecker(capfd)
test_ops = ["conv", "mul", "relu"]
test_types = []
if setting == "default":
test_types = ["Half", "Half", "Half"]
else:
for c in setting:
if c == "f":
test_types.append("Float")
elif c == "h":
test_types.append("Half")
for i, op in enumerate(test_ops):
testlog.assert_contains(
f"{test_types[i]}(1, 1, 1, 1, strides=[1, 1, 1, 1],"
f" requires_grad=1, device=cpu) = popart::{op}")
|
stix_shifter_modules/trendmicro_vision_one/test/stix_translation/test_result_translator.py | pyromaneact/stix-shifter | 129 | 11138967 | <reponame>pyromaneact/stix-shifter<gh_stars>100-1000
# -*- coding: utf-8 -*-
import json
import unittest
import uuid
from datetime import datetime
from stix_shifter_utils.stix_translation.src.utils.exceptions import LoadJsonResultsException, TranslationResultException
from stix_shifter_modules.trendmicro_vision_one.entry_point import EntryPoint
class TestResultTranslatorMixin:
@staticmethod
def get_dialect():
raise NotImplementedError()
@property
def data_source(self):
now = "{}Z".format(datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3])
return {
"id": "identity--" + str(uuid.uuid4()),
"name": "name",
"type": "identity",
"identity_class": "individual",
"created": now,
"modified": now
}
@staticmethod
def _find_by_type(objects, obj_type):
return next((obj for obj in objects if obj["type"] == obj_type), None)
@staticmethod
def _find_object_by_type(objects: dict, obj_type):
return next(((key, obj) for key, obj in objects.items() if obj["type"] == obj_type), None)
@staticmethod
def _find_objects_by_type(objects: dict, obj_type):
return [(key, obj) for key, obj in objects.items() if obj["type"] == obj_type]
@staticmethod
def _find_object(objects: dict, obj_type, obj_value):
return next(((key, obj) for key, obj in objects.items() if obj["type"] == obj_type and obj["value"] == obj_value), None)
def _get_observed_objects(self, data):
objects = self._get_objects(data)
ob_data = self._find_by_type(objects, "observed-data")
return ob_data["objects"]
def translate_results(self, data):
result = self.results_translator.translate_results(json.dumps(self.data_source), json.dumps(data))
return result
def _get_objects(self, data):
result = self.translate_results(data)
objects = result["objects"]
return objects
class TestEndpointResultTranslator(unittest.TestCase, TestResultTranslatorMixin):
def __init__(self, methodName: str = ...) -> None:
super().__init__(methodName)
self.results_translator = EntryPoint().get_results_translator(self.get_dialect())
@staticmethod
def get_dialect():
return "endpointActivityData"
def test_event_time(self):
data = [{
"eventTime": 1602729658866,
}]
objects = self._get_objects(data)
ob_data = self._find_by_type(objects, "observed-data")
self.assertTrue(ob_data, "No observed-data")
first_observed = ob_data["first_observed"]
last_observed = ob_data["last_observed"]
self.assertEqual(first_observed, "2020-10-15T02:40:58.866Z")
self.assertEqual(last_observed, "2020-10-15T02:40:58.866Z")
def test_dst(self):
data = [{
"dst": "1.1.1.1",
}]
observed_objects = self._get_observed_objects(data)
ip_key, ip_value = self._find_object_by_type(observed_objects, "ipv4-addr")
self.assertEqual(ip_value["value"], data[0]["dst"])
nt_key, nt_value = self._find_object_by_type(observed_objects, "network-traffic")
self.assertEqual(nt_value["dst_ref"], ip_key)
def test_dpt(self):
data = [{
"dpt": 88,
}]
observed_objects = self._get_observed_objects(data)
key, value = self._find_object_by_type(observed_objects, "network-traffic")
self.assertEqual(value["dst_port"], data[0]["dpt"])
def test_src(self):
data = [{
"src": "2.2.2.2",
}]
observed_objects = self._get_observed_objects(data)
ip_key, ip_value = self._find_object_by_type(observed_objects, "ipv4-addr")
self.assertEqual(ip_value["value"], data[0]["src"])
nt_key, nt_value = self._find_object_by_type(observed_objects, "network-traffic")
self.assertEqual(nt_value["src_ref"], ip_key)
def test_spt(self):
data = [{
"spt": 99,
}]
observed_objects = self._get_observed_objects(data)
key, value = self._find_object_by_type(observed_objects, "network-traffic")
self.assertEqual(value["src_port"], data[0]["spt"])
def test_network_traffic(self):
data = [{
"dst": "1.1.1.1",
"dpt": 88,
"src": "2.2.2.2",
"spt": 99,
}]
observed_objects = self._get_observed_objects(data)
key, value = self._find_object_by_type(observed_objects, "network-traffic")
src_ip_key, src_ip_value = self._find_object(observed_objects, "ipv4-addr", data[0]["src"])
dst_ip_key, dst_ip_value = self._find_object(observed_objects, "ipv4-addr", data[0]["dst"])
self.assertEqual(value["src_ref"], src_ip_key)
self.assertEqual(value["src_port"], data[0]["spt"])
self.assertEqual(value["dst_ref"], dst_ip_key)
self.assertEqual(value["dst_port"], data[0]["dpt"])
def test_endpoint_ip(self):
data = [{
"endpointIp": [
"10.10.58.51",
"127.0.0.1",
"fe80::f8e9:b28:a7a5:4b89",
"::1",
]
}]
observed_objects = self._get_observed_objects(data)
self.assertTrue(self._find_object(observed_objects, "ipv4-addr", data[0]["endpointIp"][0]))
self.assertTrue(self._find_object(observed_objects, "ipv4-addr", data[0]["endpointIp"][1]))
self.assertTrue(self._find_object(observed_objects, "ipv6-addr", data[0]["endpointIp"][2]))
self.assertTrue(self._find_object(observed_objects, "ipv6-addr", data[0]["endpointIp"][3]))
def test_object_ip(self):
data = [{
"objectIp": "9.9.9.9",
}]
observed_objects = self._get_observed_objects(data)
self.assertTrue(self._find_object(observed_objects, "ipv4-addr", data[0]["objectIp"]))
def test_object_port(self):
data = [{
"objectPort": 99,
}]
observed_objects = self._get_observed_objects(data)
key, value = self._find_object_by_type(observed_objects, "network-traffic")
self.assertEqual(value["dst_port"], data[0]["objectPort"])
def test_object_network_traffic(self):
data = [{
"objectIp": "9.9.9.9",
"objectPort": 999,
}]
observed_objects = self._get_observed_objects(data)
key, value = self._find_object_by_type(observed_objects, "network-traffic")
dst_ip_key, dst_ip_value = self._find_object(observed_objects, "ipv4-addr", data[0]["objectIp"])
self.assertEqual(value["dst_ref"], dst_ip_key)
self.assertEqual(value["dst_port"], data[0]["objectPort"])
def test_object_ips(self):
data = [{
"objectIps": [
"10.10.58.51",
"127.0.0.1",
"fe80::f8e9:b28:a7a5:4b89",
"::1",
]
}]
observed_objects = self._get_observed_objects(data)
self.assertTrue(self._find_object(observed_objects, "ipv4-addr", data[0]["objectIps"][0]))
self.assertTrue(self._find_object(observed_objects, "ipv4-addr", data[0]["objectIps"][1]))
self.assertTrue(self._find_object(observed_objects, "ipv6-addr", data[0]["objectIps"][2]))
self.assertTrue(self._find_object(observed_objects, "ipv6-addr", data[0]["objectIps"][3]))
def test_logon_user(self):
data = [{
"logonUser": "Admin",
}]
observed_objects = self._get_observed_objects(data)
key, value = self._find_object_by_type(observed_objects, "user-account")
self.assertEqual(value["account_login"], data[0]["logonUser"])
def test_object_user(self):
data = [{
"objectUser": "1001",
}]
observed_objects = self._get_observed_objects(data)
key, value = self._find_object_by_type(observed_objects, "user-account")
self.assertEqual(value["user_id"], data[0]["objectUser"])
def test_user_account(self):
data = [{
"logonUser": "Admin",
"objectUser": "1001",
}]
observed_objects = self._get_observed_objects(data)
key, value = self._find_object_by_type(observed_objects, "user-account")
self.assertEqual(value["account_login"], data[0]["logonUser"])
self.assertEqual(value["user_id"], data[0]["objectUser"])
def test_host_name(self):
data = [{
"hostName": "aaa.bbb.ccc",
}]
observed_objects = self._get_observed_objects(data)
key, value = self._find_object_by_type(observed_objects, "domain-name")
self.assertEqual(value["value"], data[0]["hostName"])
def test_object_host_name(self):
data = [{
"objectHostName": "aaa.bbb.ccc",
}]
observed_objects = self._get_observed_objects(data)
key, value = self._find_object_by_type(observed_objects, "domain-name")
self.assertEqual(value["value"], data[0]["objectHostName"])
def test_object_cmd(self):
data = [{
"objectCmd": "c:\\bin\\object_command.bat",
"objectFileHashSha1": "999404bd02d9752f406f7440567daf2495870e14",
"objectFilePath": "c:\\users\\debbie\\appdata\\local\\microsoft\\onedrive\\19.192.0926.0012\\filesync.localizedresources.dll",
}]
observed_objects = self._get_observed_objects(data)
dir_key, dir_value = self._find_object_by_type(observed_objects, "directory")
self.assertEqual(dir_value["path"], "c:\\users\\debbie\\appdata\\local\\microsoft\\onedrive\\19.192.0926.0012")
file_key, file_value = self._find_object_by_type(observed_objects, "file")
self.assertEqual(file_value["name"], "filesync.localizedresources.dll")
self.assertEqual(file_value["parent_directory_ref"], dir_key)
self.assertEqual(file_value["hashes"]["SHA-1"], data[0]["objectFileHashSha1"])
key, value = self._find_object_by_type(observed_objects, "process")
self.assertEqual(value["command_line"], data[0]["objectCmd"])
self.assertEqual(value["binary_ref"], file_key)
def test_object_registry(self):
data = [{
"objectRegistryData": "4359",
"objectRegistryKeyHandle": "hkcu\\software\\microsoft\\internet explorer\\domstorage\\office.com",
"objectRegistryValue": "total",
}]
observed_objects = self._get_observed_objects(data)
key_key, key_value = self._find_object_by_type(observed_objects, "windows-registry-key")
self.assertEqual(key_value["key"], r"HKEY_CURRENT_USER\software\microsoft\internet explorer\domstorage\office.com")
self.assertDictEqual(key_value["values"][0], {'name': data[0]["objectRegistryValue"], 'data': data[0]["objectRegistryData"]})
def test_process_cmd(self):
data = [{
"processCmd": "c:\\program files (x86)\\internet explorer\\iexplore.exe scodef:14872 credat:17410 /prefetch:2",
"processFileHashSha1": "0b603b11d39ffaf773bf71df3fe854cd652c1a05",
"processFilePath": "c:\\program files (x86)\\internet explorer\\iexplore.exe",
}]
observed_objects = self._get_observed_objects(data)
dir_key, dir_value = self._find_object_by_type(observed_objects, "directory")
self.assertEqual(dir_value["path"], "c:\\program files (x86)\\internet explorer")
file_key, file_value = self._find_object_by_type(observed_objects, "file")
self.assertEqual(file_value["name"], "iexplore.exe")
self.assertEqual(file_value["parent_directory_ref"], dir_key)
self.assertEqual(file_value["hashes"]["SHA-1"], data[0]["processFileHashSha1"])
key, value = self._find_object_by_type(observed_objects, "process")
self.assertEqual(value["command_line"], data[0]["processCmd"])
self.assertEqual(value["binary_ref"], file_key)
def test_request(self):
data = [{
"request": "https://aaa.bbb.ccc",
}]
observed_objects = self._get_observed_objects(data)
key, value = self._find_object_by_type(observed_objects, "url")
self.assertEqual(value["value"], data[0]["request"])
def test_parent_cmd(self):
data = [{
"parentCmd": "c:\\program files\\internet explorer\\iexplore.exe",
"parentFileHashSha1": "b1a662c917938e4071d0999e2a942a42d8e2395f",
"parentFilePath": "c:\\program files\\internet explorer\\iexplore.exe",
}]
observed_objects = self._get_observed_objects(data)
dir_key, dir_value = self._find_object_by_type(observed_objects, "directory")
self.assertEqual(dir_value["path"], "c:\\program files\\internet explorer")
file_key, file_value = self._find_object_by_type(observed_objects, "file")
self.assertEqual(file_value["name"], "iexplore.exe")
self.assertEqual(file_value["parent_directory_ref"], dir_key)
self.assertEqual(file_value["hashes"]["SHA-1"], data[0]["parentFileHashSha1"])
key, value = self._find_object_by_type(observed_objects, "process")
self.assertEqual(value["command_line"], data[0]["parentCmd"])
self.assertEqual(value["binary_ref"], file_key)
def test_src_file(self):
data = [{
"srcFileHashSha1": "999404bd02d9752f406f7440567daf2495870e14",
"srcFilePath": "/src_aaa/bbb/ccc.dat",
}]
observed_objects = self._get_observed_objects(data)
dir_key, dir_value = self._find_object_by_type(observed_objects, "directory")
self.assertEqual(dir_value["path"], "/src_aaa/bbb")
file_key, file_value = self._find_object_by_type(observed_objects, "file")
self.assertEqual(file_value["name"], "ccc.dat")
self.assertEqual(file_value["parent_directory_ref"], dir_key)
self.assertEqual(file_value["hashes"]["SHA-1"], data[0]["srcFileHashSha1"])
def test_malformed_payload(self):
self.assertRaises(LoadJsonResultsException, self.translate_results, "test")
def test_missing_id(self):
self.assertRaises(TranslationResultException, self.results_translator.translate_results, "{}", "{}")
class TestMessageResultTranslator(unittest.TestCase, TestResultTranslatorMixin):
def __init__(self, methodName: str = ...) -> None:
super().__init__(methodName)
self.results_translator = EntryPoint().get_results_translator(self.get_dialect())
@staticmethod
def get_dialect():
return "messageActivityData"
def test_sender(self):
data = [{
"mail_message_sender": "<EMAIL>",
}]
observed_objects = self._get_observed_objects(data)
addr_key, addr_value = self._find_object_by_type(observed_objects, "email-addr")
self.assertEqual(addr_value["value"], data[0]["mail_message_sender"])
mail_key, mail_value = self._find_object_by_type(observed_objects, "email-message")
self.assertEqual(mail_value["sender_ref"], addr_key)
def test_recipient(self):
data = [{
"mail_message_recipient": [
"<EMAIL>",
"<EMAIL>"
],
}]
observed_objects = self._get_observed_objects(data)
values_1 = self._find_object(observed_objects, "email-addr", "<EMAIL>")
self.assertTrue(values_1)
value_2 = self._find_object(observed_objects, "email-addr", "<EMAIL>")
self.assertTrue(value_2)
mail_key, mail_value = self._find_object_by_type(observed_objects, "email-message")
self.assertSetEqual(set(mail_value["to_refs"]), {values_1[0], value_2[0]})
def test_subject(self):
data = [{
"mail_message_subject": "Message Center Major Change Update Notification",
}]
observed_objects = self._get_observed_objects(data)
key, value = self._find_object_by_type(observed_objects, "email-message")
self.assertEqual(value["subject"], data[0]["mail_message_subject"])
def test_delivery_time(self):
data = [{
"mail_message_delivery_time": "2021-04-13T08:30:56.000Z",
}]
observed_objects = self._get_observed_objects(data)
key, value = self._find_object_by_type(observed_objects, "email-message")
self.assertEqual(value["date"], data[0]["mail_message_delivery_time"])
def test_headers(self):
data = [{
"mail_message_id": "<<EMAIL>>",
"mail_internet_headers": [
{
"Value": "<EMAIL>@bbb.ccc.ddd",
"HeaderName": "Return-Path"
},
{
"Value": "spf=pass (sender IP is 192.168.127.12); compauth=pass reason=100",
"HeaderName": "Authentication-Results"
}
],
}]
observed_objects = self._get_observed_objects(data)
key, value = self._find_object_by_type(observed_objects, "email-message")
self.assertDictEqual(value["additional_header_fields"], {
data[0]["mail_internet_headers"][0]["HeaderName"]: data[0]["mail_internet_headers"][0]["Value"],
data[0]["mail_internet_headers"][1]["HeaderName"]: data[0]["mail_internet_headers"][1]["Value"],
"Message-ID": data[0]["mail_message_id"],
})
def test_message_id(self):
data = [{
"mail_message_id": "<<EMAIL>>",
}]
observed_objects = self._get_observed_objects(data)
key, value = self._find_object_by_type(observed_objects, "email-message")
self.assertDictEqual(value["additional_header_fields"], {
"Message-ID": data[0]["mail_message_id"],
})
def test_email_message(self):
data = [{
"mail_message_sender": "<EMAIL>",
"mail_message_recipient": [
"<EMAIL>",
"<EMAIL>"
],
"mail_message_subject": "Message Center Major Change Update Notification",
"mail_message_delivery_time": "2021-04-13T08:30:56.000Z",
"mail_message_id": "<<EMAIL>>",
"mail_internet_headers": [
{
"Value": "<EMAIL>",
"HeaderName": "Return-Path"
},
{
"Value": "spf=pass (sender IP is 192.168.127.12); compauth=pass reason=100",
"HeaderName": "Authentication-Results"
}
],
}]
observed_objects = self._get_observed_objects(data)
sender_addr_key, sender_addr_value = self._find_object(observed_objects, "email-addr", "<EMAIL>")
self.assertEqual(sender_addr_value["value"], data[0]["mail_message_sender"])
values_1 = self._find_object(observed_objects, "email-addr", "<EMAIL>")
self.assertTrue(values_1)
value_2 = self._find_object(observed_objects, "email-addr", "<EMAIL>")
self.assertTrue(value_2)
mail_key, mail_value = self._find_object_by_type(observed_objects, "email-message")
self.assertEqual(mail_value["sender_ref"], sender_addr_key)
self.assertSetEqual(set(mail_value["to_refs"]), {values_1[0], value_2[0]})
self.assertEqual(mail_value["subject"], data[0]["mail_message_subject"])
self.assertEqual(mail_value["date"], data[0]["mail_message_delivery_time"])
self.assertDictEqual(mail_value["additional_header_fields"], {
data[0]["mail_internet_headers"][0]["HeaderName"]: data[0]["mail_internet_headers"][0]["Value"],
data[0]["mail_internet_headers"][1]["HeaderName"]: data[0]["mail_internet_headers"][1]["Value"],
"Message-ID": data[0]["mail_message_id"],
})
def test_mail_urls(self):
data = [{
"mail_urls": [
"https://aaa.bbb.ccc/ddd",
"https://bbb.ccc.ddd/eee"
],
}]
observed_objects = self._get_observed_objects(data)
urls = {value[1]["value"] for value in self._find_objects_by_type(observed_objects, "url")}
self.assertSetEqual(urls, set(data[0]["mail_urls"]))
def test_source_domain(self):
data = [{
"source_domain": "aaa.bbb.ccc",
}]
observed_objects = self._get_observed_objects(data)
key, value = self._find_object_by_type(observed_objects, "domain-name")
self.assertEqual(value["value"], data[0]["source_domain"])
def test_source_ip(self):
data = [{
"source_ip": "192.168.127.12",
}]
observed_objects = self._get_observed_objects(data)
ip_key, ip_value = self._find_object_by_type(observed_objects, "ipv4-addr")
self.assertEqual(ip_value["value"], data[0]["source_ip"])
nt_key, nt_value = self._find_object_by_type(observed_objects, "network-traffic")
self.assertEqual(nt_value["src_ref"], ip_key)
def test_attachments(self):
data = [{
"mail_attachments": [
{
"file_name": "test111.txt"
},
{
"file_sha1": "46932d56cd30feda77c878e2f72432fce2736918",
},
{
"file_sha1": "823c3ee108cbfbf27c39361682592f83cdd8ad24",
"file_name": "tes333.txt"
}
],
}]
observed_objects = self._get_observed_objects(data)
file_obj = [value[1] for value in self._find_objects_by_type(observed_objects, "file")]
file_item = file_obj[0]
self.assertEqual(file_item["name"], data[0]["mail_attachments"][0].get("file_name", ""))
file_item = file_obj[1]
self.assertEqual(file_item["hashes"]["SHA-1"], data[0]["mail_attachments"][1]["file_sha1"])
file_item = file_obj[2]
self.assertEqual(file_item["name"], data[0]["mail_attachments"][2].get("file_name", ""))
self.assertEqual(file_item["hashes"]["SHA-1"], data[0]["mail_attachments"][2]["file_sha1"])
|
loopchain/blockchain/transactions/v3_issue/__init__.py | windies21/loopchain | 105 | 11138968 | <gh_stars>100-1000
from .transaction import Transaction, HASH_SALT
from .transaction_serializer import TransactionSerializer
from .transaction_verifier import TransactionVerifier
version = Transaction.version
|
app/server/labml_app/analyses/computers/disk.py | vishalbelsare/labml | 463 | 11138973 | from typing import Dict, Any
from fastapi import Request
from fastapi.responses import JSONResponse
from labml_db import Model, Index
from labml_db.serializer.pickle import PickleSerializer
from labml_app.logger import logger
from labml_app.enums import COMPUTEREnums
from ..analysis import Analysis
from ..series import SeriesModel, Series
from ..series_collection import SeriesCollection
from ..preferences import Preferences
from .. import helper
@Analysis.db_model(PickleSerializer, 'Disk')
class DiskModel(Model['DiskModel'], SeriesCollection):
pass
@Analysis.db_index(PickleSerializer, 'disk_index')
class DiskIndex(Index['Disk']):
pass
@Analysis.db_model(PickleSerializer, 'disk_preferences')
class DiskPreferencesModel(Model['DiskPreferencesModel'], Preferences):
pass
@Analysis.db_index(PickleSerializer, 'disk_preferences_index')
class DiskPreferencesIndex(Index['DiskPreferences']):
pass
class DiskAnalysis(Analysis):
disk: DiskModel
def __init__(self, data):
self.disk = data
def track(self, data: Dict[str, SeriesModel]):
res: Dict[str, SeriesModel] = {}
for ind, s in data.items():
ind_type = ind.split('.')[0]
if ind_type == COMPUTEREnums.DISK:
res[ind] = s
self.disk.track(res)
def get_tracking(self):
res = []
for ind, track in self.disk.tracking.items():
name = ind.split('.')
if any(x in ['total'] for x in name):
continue
series: Dict[str, Any] = Series().load(track).detail
series['name'] = '.'.join(name)
res.append(series)
res.sort(key=lambda s: s['name'])
helper.remove_common_prefix(res, 'name')
return res
@staticmethod
def get_or_create(session_uuid: str):
disk_key = DiskIndex.get(session_uuid)
if not disk_key:
d = DiskModel()
d.save()
DiskIndex.set(session_uuid, d.key)
dp = DiskPreferencesModel()
dp.save()
DiskPreferencesIndex.set(session_uuid, dp.key)
return DiskAnalysis(d)
return DiskAnalysis(disk_key.load())
@staticmethod
def delete(session_uuid: str):
disk_key = DiskIndex.get(session_uuid)
preferences_key = DiskPreferencesIndex.get(session_uuid)
if disk_key:
d: DiskModel = disk_key.load()
DiskIndex.delete(session_uuid)
d.delete()
if preferences_key:
dp: DiskPreferencesModel = preferences_key.load()
DiskPreferencesIndex.delete(session_uuid)
dp.delete()
@Analysis.route('GET', 'disk/{session_uuid}')
async def get_disk_tracking(request: Request, session_uuid: str) -> Any:
track_data = []
status_code = 404
ans = DiskAnalysis.get_or_create(session_uuid)
if ans:
track_data = ans.get_tracking()
status_code = 200
response = JSONResponse({'series': track_data, 'insights': []})
response.status_code = status_code
return response
@Analysis.route('GET', 'disk/preferences/{session_uuid}')
async def get_disk_preferences(request: Request, session_uuid: str) -> Any:
preferences_data = {}
preferences_key = DiskPreferencesIndex.get(session_uuid)
if not preferences_key:
return preferences_data
dp: DiskPreferencesModel = preferences_key.load()
return dp.get_data()
@Analysis.route('POST', 'disk/preferences/{session_uuid}')
async def set_disk_preferences(request: Request, session_uuid: str) -> Any:
preferences_key = DiskPreferencesIndex.get(session_uuid)
if not preferences_key:
return {}
dp = preferences_key.load()
json = await request.json()
dp.update_preferences(json)
logger.debug(f'update disk preferences: {dp.key}')
return {'errors': dp.errors}
|
testing/toy_model.py | chenw23/open_lth | 509 | 11138999 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from models.base import Model
class InnerProductModel(Model):
@staticmethod
def default_hparams(): raise NotImplementedError
@staticmethod
def is_valid_model_name(model_name): raise NotImplementedError
@staticmethod
def get_model_from_name(model_name): raise NotImplementedError
@property
def output_layer_names(self): raise NotImplementedError
@property
def loss_criterion(self): return torch.nn.MSELoss()
def __init__(self, n):
super(Model, self).__init__()
self.layer = torch.nn.Linear(n, 1, bias=False)
self.layer.weight.data = torch.arange(n, dtype=torch.float32)
def forward(self, x):
return self.layer(x)
|
tests/test_log_levels.py | rohankumardubey/structlog | 1,751 | 11139006 | <filename>tests/test_log_levels.py
# SPDX-License-Identifier: MIT OR Apache-2.0
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the MIT License. See the LICENSE file in the root of this
# repository for complete details.
import logging
import pickle
import pytest
from structlog import make_filtering_bound_logger
from structlog._log_levels import _LEVEL_TO_NAME
from structlog.testing import CapturingLogger
@pytest.fixture(name="cl")
def fixture_cl():
return CapturingLogger()
@pytest.fixture(name="bl")
def fixture_bl(cl):
return make_filtering_bound_logger(logging.INFO)(cl, [], {})
class TestFilteringLogger:
def test_exact_level(self, bl, cl):
"""
if log level is exactly the min_level, log.
"""
bl.info("yep")
assert [("info", (), {"event": "yep"})] == cl.calls
def test_one_below(self, bl, cl):
"""
if log level is exactly the min_level, log.
"""
bl.debug("nope")
assert [] == cl.calls
def test_exception(self, bl, cl):
"""
exception ensures that exc_info is set to True, unless it's already
set.
"""
bl.exception("boom")
assert [("error", (), {"event": "boom", "exc_info": True})]
def test_exception_passed(self, bl, cl):
"""
exception if exc_info has a value, exception doesn't tamper with it.
"""
bl.exception("boom", exc_info=42)
assert [("error", (), {"event": "boom", "exc_info": 42})]
@pytest.mark.parametrize("level", tuple(_LEVEL_TO_NAME.keys()))
def test_pickle(self, level):
"""
FilteringBoundLogger are pickleable.
"""
bl = make_filtering_bound_logger(level)
assert bl == pickle.loads(pickle.dumps(bl))
|
architect/commands/__init__.py | dimagi/architect | 352 | 11139013 | """
Provides unified interface for all Architect commands. Each command should live
in a separate module and define an "arguments" variable which should contain the
command's arguments and a "run" function which implements the command's behaviour.
"""
import os
import sys
import pkgutil
import argparse
from .. import __version__, orms
from ..exceptions import (
BaseArchitectError,
CommandNotProvidedError,
CommandError,
CommandArgumentError
)
commands = {}
for _, name, __ in pkgutil.iter_modules([os.path.dirname(__file__)]):
commands[name] = {'module': __import__(name, globals(), level=1)}
sys.path.append(os.getcwd())
class ArgumentParser(argparse.ArgumentParser):
def result(self, message):
"""
Prints command execution result in a common format.
:param string message: (required). Message to print.
"""
self._print_message('{0}: result: {1}\n'.format(self.prog, str(message)), sys.stdout)
def error(self, message):
"""
Redefines some of argparse's error messages to be more friendly.
:param string message: (required). Error message to print.
"""
commands_list = commands.keys()
if 'too few arguments' in message:
message = str(CommandNotProvidedError(allowed=commands_list))
elif 'invalid choice' in message and ', '.join(map(repr, commands_list)) in message:
message = str(CommandError(current=message.split("'")[1], allowed=commands_list))
elif 'unrecognized arguments' in message:
command = [cmd for cmd in sys.argv[1:] if not cmd.startswith('-')][0]
command_arguments = []
for args in commands[command]['module'].arguments:
arg = list(args.keys())[0]
command_arguments.append('{0} ({1})'.format(arg[1], arg[0]))
message = str(CommandArgumentError(current=message.split(': ')[1], allowed=command_arguments))
self.exit(2, '{0}: error: {1}\n'.format(self.prog, message[:1].lower() + message[1:]))
def main():
"""
Initialization function for all commands.
"""
parser = ArgumentParser(prog='architect')
parser.add_argument('-v', '--version', action='version', version='Architect {0}'.format(__version__))
subparsers = parser.add_subparsers(title='commands', help='run one of the commands for additional functionality')
for command in commands:
commands[command]['parser'] = subparsers.add_parser(
command,
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=50, width=100))
for argument in commands[command]['module'].arguments:
for names, options in argument.items():
commands[command]['parser'].add_argument(*names, **options)
commands[command]['parser'].set_defaults(func=commands[command]['module'].run)
args = parser.parse_args()
# Starting from Python 3.3 the check for empty arguments was removed
# from argparse for some strange reason, so we have to emulate it here
try:
command = args.func.__module__.split('.')[-1]
except AttributeError:
parser.error('too few arguments')
else:
orms.init()
try:
commands[command]['parser'].result(args.func(vars(args)))
except BaseArchitectError as e:
commands[command]['parser'].error(str(e))
|
ultimatepython/data_structures/list.py | Mu-L/ultimate-python | 3,678 | 11139038 | """
Lists are a sequence of values that can be modified at runtime. This
module shows how lists are created, iterated, accessed, extended
and shortened.
"""
def main():
# This is a list of strings where
# "a" is a string at index 0 and
# "e" is a string at index 4
letters = ["a", "b", "c", "d", "e"]
assert letters[0] == "a"
assert letters[4] == letters[-1] == "e"
for letter in letters:
# Each of the strings is one character
assert len(letter) == 1
# Each of the strings is a letter
assert letter.isalpha()
# We can get a subset of letters with range slices
assert letters[1:] == ["b", "c", "d", "e"]
assert letters[:-1] == ["a", "b", "c", "d"]
assert letters[1:-2] == ["b", "c"]
assert letters[0:3:2] == ["a", "c"]
assert letters[::2] == ["a", "c", "e"]
assert letters[::-2] == ["e", "c", "a"]
assert letters[::-1] == ["e", "d", "c", "b", "a"]
# This is a list of integers where
# 1 is an integer at index 0 and
# 5 is an integer at index 4
numbers = [1, 2, 3, 4, 5]
assert numbers[0] == 1
assert numbers[4] == numbers[-1] == 5
# Note that a list is ordered and mutable. If we want to reverse the order
# of the `numbers` list, we can start at index 0 and end halfway. At each
# step of the `for` loop, we swap a value from the first half of the list
# with a value from the second half of the list
for ix_front in range(len(numbers) // 2):
ix_back = len(numbers) - ix_front - 1
numbers[ix_front], numbers[ix_back] = numbers[ix_back], numbers[ix_front]
# Let's check that `numbers` is in reverse order
assert numbers == [5, 4, 3, 2, 1]
# Suppose that we want to go back to the original order, we can use the
# builtin `reverse` method in lists
numbers.reverse()
# Let's check that `numbers` is in the original order
assert numbers == [1, 2, 3, 4, 5]
# Print letters and numbers side-by-side using the `zip` function. Notice
# that we pair the letter at index 0 with the number at index 0, and
# do the same for the remaining indices. To see the indices and values
# of a list at the same time, we can use `enumerate` to transform the
# list of values into an iterator of index-value pairs
for index, (letter, number) in enumerate(zip(letters, numbers)):
assert letters[index] == letter
assert numbers[index] == number
# The `for` loop worked because the lengths of both lists are equal
assert len(letters) == len(numbers)
# Lists can be nested at arbitrary levels
matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert matrix[0][0] == 1
assert matrix[0][1] == 2
assert matrix[1][0] == 4
assert matrix[1][1] == 5
# This matrix just so happens to be a square so that the length of each
# row is the same as the number of rows in the matrix
for row in matrix:
assert len(matrix) == len(row)
# Notice that lists have variable length and can be modified to have
# more elements. Lists can also be modified to have less elements
lengthy = []
for i in range(5):
lengthy.append(i) # add 0..4 to the back
assert lengthy == [0, 1, 2, 3, 4]
lengthy.pop() # pop out the 4 from the back
assert lengthy == [0, 1, 2, 3]
if __name__ == "__main__":
main()
|
foreman/data_refinery_foreman/foreman/management/commands/assoc_experiment_results.py | AlexsLemonade/refinebio | 106 | 11139070 | """This command will go through ComputationalResult objects that are a
result of the tximport Processor and associate them with the
experiment that tximport was run on. It's purpose is to populate
missing links in our data model, however once it does so there are
additional PRs planned which will break an assumption that this
command makes, so it should be removed once it has served its purpose.
The assumption this command is relying on is:
* tximport has only been run on full experiments.
"""
from django.core.management.base import BaseCommand
from data_refinery_common.models import *
def make_experiment_result_associations():
"""This function performs the function explained at the head of this file.
It does so by following this general strategy:
1. Get tximport results by querying based on the processor
2. Then get one associated sample
3. Then go through that sample's experiments until an experiment is found
that has all of its samples associated with that result.
4. Then make an association with that result."""
# There are multiple "Processor" objects for tximport because
# we create a new one for each version. However we don't care
# which version was used, we just all tximport results.
tximport_processors = Processor.objects.filter(name="Tximport").all()
for tximport_processor in tximport_processors:
results = ComputationalResult.objects.filter(processor=tximport_processor)
for result in results:
result_sample = result.samples.first()
for experiment in result_sample.experiments.all():
experiment_samples = experiment.samples.all()
num_result_associations = 0
for experiment_sample in experiment_samples:
try:
SampleResultAssociation.objects.get(sample=experiment_sample, result=result)
# If we've made it here, then the association exists so count it!
num_result_associations += 1
except Exception:
# If we've made it here, then the
# association doesn't exist so this isn't
# the experiment that the result is for.
break
if num_result_associations == len(experiment_samples):
# Every sample in the experiment is associated
# with this ComputationalResult, so we can
# safely say the experiment is associated with
# it and make that relationship explicit.
ExperimentResultAssociation.objects.get_or_create(
experiment=experiment, result=result
)
class Command(BaseCommand):
def handle(self, *args, **options):
"""This is just the entrypoint for this management command.
All of its work is done in a separate function because that
makes it much easier to test."""
make_experiment_result_associations()
|
esmvaltool/diag_scripts/hydrology/derive_evspsblpot.py | cffbots/ESMValTool | 148 | 11139104 | """Derive Potential Evapotransporation (evspsblpot) using De Bruin (2016).
<NAME>., <NAME>., <NAME>., <NAME>.: A
Thermodynamically Based Model for Actual Evapotranspiration of an Extensive
Grass Field Close to FAO Reference, Suitable for Remote Sensing Application,
American Meteorological Society, 17, 1373-1382, DOI: 10.1175/JHM-D-15-0006.1,
2016.
"""
import numpy as np
import iris
def tetens_derivative(tas):
"""Compute the derivative of Teten's formula for saturated vapor pressure.
Tetens formula (https://en.wikipedia.org/wiki/Tetens_equation) :=
es(T) = e0 * exp(a * T / (T + b))
Derivate (checked with Wolfram alpha)
des / dT = a * b * e0 * exp(a * T / (b + T)) / (b + T)^2
"""
# Ensure temperature is in degC
tas.convert_units('degC')
# Saturated vapour pressure at 273 Kelvin
e0_const = iris.coords.AuxCoord(np.float32(6.112),
long_name='Saturated vapour pressure',
units='hPa')
emp_a = np.float32(17.67) # empirical constant a
# Empirical constant b in Tetens formula
emp_b = iris.coords.AuxCoord(np.float32(243.5),
long_name='Empirical constant b',
units='degC')
exponent = iris.analysis.maths.exp(emp_a * tas / (tas + emp_b))
return (exponent * e0_const / (tas + emp_b)**2) * (emp_a * emp_b)
def get_constants(psl):
"""Define constants to compute De Bruin (2016) reference evaporation.
The Definition of rv and rd constants is provided in
Wallace and Hobbs (2006), 2.6 equation 3.14.
The Definition of lambda and cp is provided in Wallace and Hobbs 2006.
The Definition of beta and cs is provided in De Bruin (2016), section 4a.
"""
# Ensure psl is in hPa
psl.convert_units('hPa')
# Definition of constants
# source='Wallace and Hobbs (2006), 2.6 equation 3.14',
rv_const = iris.coords.AuxCoord(np.float32(461.51),
long_name='Gas constant water vapour',
units='J K-1 kg-1')
# source='Wallace and Hobbs (2006), 2.6 equation 3.14',
rd_const = iris.coords.AuxCoord(np.float32(287.0),
long_name='Gas constant dry air',
units='J K-1 kg-1')
# Latent heat of vaporization in J kg-1 (or J m-2 day-1)
# source='Wallace and Hobbs 2006'
lambda_ = iris.coords.AuxCoord(np.float32(2.5e6),
long_name='Latent heat of vaporization',
units='J kg-1')
# Specific heat of dry air constant pressure
# source='Wallace and Hobbs 2006',
cp_const = iris.coords.AuxCoord(np.float32(1004),
long_name='Specific heat of dry air',
units='J K-1 kg-1')
# source='De Bruin (2016), section 4a',
beta = iris.coords.AuxCoord(np.float32(20),
long_name='Correction Constant',
units='W m-2')
# source = 'De Bruin (2016), section 4a',
cs_const = iris.coords.AuxCoord(np.float32(110),
long_name='Empirical constant',
units='W m-2')
# source = De Bruin (10.1175/JHM-D-15-0006.1), page 1376
# gamma = (rv/rd) * (cp*msl/lambda_)
# iris.exceptions.NotYetImplementedError: coord / coord
rv_rd_const = rv_const.points[0] / rd_const.points[0]
gamma = rv_rd_const * (psl * cp_const / lambda_)
return gamma, cs_const, beta, lambda_
def debruin_pet(psl, rsds, rsdt, tas):
"""Compute De Bruin (2016) reference evaporation.
Implement equation 6 from De Bruin (10.1175/JHM-D-15-0006.1)
"""
# Variable derivation
delta_svp = tetens_derivative(tas)
gamma, cs_const, beta, lambda_ = get_constants(psl)
# the definition of the radiation components according to the paper:
kdown = rsds
kdown_ext = rsdt
# Equation 5
rad_factor = np.float32(1 - 0.23)
net_radiation = (rad_factor * kdown) - (kdown * cs_const / kdown_ext)
# Equation 6
# the unit is W m-2
ref_evap = ((delta_svp / (delta_svp + gamma)) * net_radiation) + beta
pet = ref_evap / lambda_
pet.var_name = 'evspsblpot'
pet.standard_name = 'water_potential_evaporation_flux'
pet.long_name = 'Potential Evapotranspiration'
return pet
|
modeller/transformation.py | SunGuo/500lines | 134 | 11139130 | <gh_stars>100-1000
import numpy
def translation(displacement):
t = numpy.identity(4)
t[0, 3] = displacement[0]
t[1, 3] = displacement[1]
t[2, 3] = displacement[2]
return t
def scaling(scale):
s = numpy.identity(4)
s[0, 0] = scale[0]
s[1, 1] = scale[1]
s[2, 2] = scale[2]
s[3, 3] = 1
return s
|
slack_bolt/context/respond/async_respond.py | hirosassa/bolt-python | 504 | 11139135 | from typing import Optional, Union, Sequence
from slack_sdk.models.attachments import Attachment
from slack_sdk.models.blocks import Block
from slack_sdk.webhook.async_client import AsyncWebhookClient, WebhookResponse
from slack_bolt.context.respond.internals import _build_message
class AsyncRespond:
response_url: Optional[str]
def __init__(self, *, response_url: Optional[str]):
self.response_url: Optional[str] = response_url
async def __call__(
self,
text: Union[str, dict] = "",
blocks: Optional[Sequence[Union[dict, Block]]] = None,
attachments: Optional[Sequence[Union[dict, Attachment]]] = None,
response_type: Optional[str] = None,
replace_original: Optional[bool] = None,
delete_original: Optional[bool] = None,
unfurl_links: Optional[bool] = None,
unfurl_media: Optional[bool] = None,
) -> WebhookResponse:
if self.response_url is not None:
client = AsyncWebhookClient(self.response_url)
text_or_whole_response: Union[str, dict] = text
if isinstance(text_or_whole_response, str):
message = _build_message(
text=text,
blocks=blocks,
attachments=attachments,
response_type=response_type,
replace_original=replace_original,
delete_original=delete_original,
unfurl_links=unfurl_links,
unfurl_media=unfurl_media,
)
return await client.send_dict(message)
elif isinstance(text_or_whole_response, dict):
whole_response: dict = text_or_whole_response
message = _build_message(**whole_response)
return await client.send_dict(message)
else:
raise ValueError(f"The arg is unexpected type ({type(text)})")
else:
raise ValueError("respond is unsupported here as there is no response_url")
|
elastalert/alerters/gitter.py | perceptron01/elastalert2 | 250 | 11139193 | import json
import requests
from requests import RequestException
from elastalert.alerts import Alerter, DateTimeEncoder
from elastalert.util import EAException, elastalert_logger
class GitterAlerter(Alerter):
""" Creates a Gitter activity message for each alert """
required_options = frozenset(['gitter_webhook_url'])
def __init__(self, rule):
super(GitterAlerter, self).__init__(rule)
self.gitter_webhook_url = self.rule.get('gitter_webhook_url', None)
self.gitter_proxy = self.rule.get('gitter_proxy', None)
self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error')
def alert(self, matches):
body = self.create_alert_body(matches)
# post to Gitter
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.gitter_proxy} if self.gitter_proxy else None
payload = {
'message': body,
'level': self.gitter_msg_level
}
try:
response = requests.post(self.gitter_webhook_url,
data=json.dumps(payload, cls=DateTimeEncoder),
headers=headers,
proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Gitter: %s" % e)
elastalert_logger.info("Alert sent to Gitter")
def get_info(self):
return {'type': 'gitter',
'gitter_webhook_url': self.gitter_webhook_url}
|
tdda/referencetest/tests/testregeneration.py | jjlee42/tdda | 232 | 11139199 | # -*- coding: utf-8 -*-
#
# Unit tests for reference data regeneration
#
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import os
import tempfile
import unittest
from tdda.referencetest.referencetest import ReferenceTest
class TestRegenerate(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tmpdir = tempfile.gettempdir()
def setUp(self):
ReferenceTest.set_default_data_location(self.tmpdir)
ReferenceTest.set_defaults(verbose=False)
def tearDown(self):
ReferenceTest.set_regeneration(regenerate=False)
def test_regenerate_all(self):
ReferenceTest.set_regeneration()
ref = ReferenceTest(assert_fn=self.assertTrue)
refname = 'regenerate_all.txt'
reffile = os.path.join(self.tmpdir, refname)
ref.assertStringCorrect('Start\nMiddle\nEnd\n', refname)
with open(reffile) as f:
self.assertEqual(f.read(), 'Start\nMiddle\nEnd\n')
ref.assertStringCorrect('Completely different content', refname)
with open(reffile) as f:
self.assertEqual(f.read(), 'Completely different content')
def test_regenerate_kinds(self):
ReferenceTest.set_regeneration('csv')
ref = ReferenceTest(assert_fn=self.assertTrue)
txtname = 'regenerate.txt'
csvname = 'regenerate.csv'
txtfile = os.path.join(self.tmpdir, txtname)
csvfile = os.path.join(self.tmpdir, csvname)
with self.assertRaises(Exception):
ref.assertStringCorrect('End\nMiddle\nStart\n', txtname,
kind='txt')
ref.assertStringCorrect('Start\nMiddle\nEnd\n', csvname, kind='csv')
with open(csvfile) as f:
self.assertEqual(f.read(), 'Start\nMiddle\nEnd\n')
ref.assertStringCorrect('Completely different content', csvname,
kind='csv')
with open(csvfile) as f:
self.assertEqual(f.read(), 'Completely different content')
if __name__ == '__main__':
unittest.main()
|
sdk/python/pulumi_azure/securitycenter/workspace.py | henriktao/pulumi-azure | 109 | 11139205 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['WorkspaceArgs', 'Workspace']
@pulumi.input_type
class WorkspaceArgs:
def __init__(__self__, *,
scope: pulumi.Input[str],
workspace_id: pulumi.Input[str]):
"""
The set of arguments for constructing a Workspace resource.
:param pulumi.Input[str] scope: The scope of VMs to send their security data to the desired workspace, unless overridden by a setting with more specific scope.
:param pulumi.Input[str] workspace_id: The ID of the Log Analytics Workspace to save the data in.
"""
pulumi.set(__self__, "scope", scope)
pulumi.set(__self__, "workspace_id", workspace_id)
@property
@pulumi.getter
def scope(self) -> pulumi.Input[str]:
"""
The scope of VMs to send their security data to the desired workspace, unless overridden by a setting with more specific scope.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: pulumi.Input[str]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> pulumi.Input[str]:
"""
The ID of the Log Analytics Workspace to save the data in.
"""
return pulumi.get(self, "workspace_id")
@workspace_id.setter
def workspace_id(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_id", value)
@pulumi.input_type
class _WorkspaceState:
def __init__(__self__, *,
scope: Optional[pulumi.Input[str]] = None,
workspace_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Workspace resources.
:param pulumi.Input[str] scope: The scope of VMs to send their security data to the desired workspace, unless overridden by a setting with more specific scope.
:param pulumi.Input[str] workspace_id: The ID of the Log Analytics Workspace to save the data in.
"""
if scope is not None:
pulumi.set(__self__, "scope", scope)
if workspace_id is not None:
pulumi.set(__self__, "workspace_id", workspace_id)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input[str]]:
"""
The scope of VMs to send their security data to the desired workspace, unless overridden by a setting with more specific scope.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Log Analytics Workspace to save the data in.
"""
return pulumi.get(self, "workspace_id")
@workspace_id.setter
def workspace_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workspace_id", value)
class Workspace(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
scope: Optional[pulumi.Input[str]] = None,
workspace_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages the subscription's Security Center Workspace.
> **NOTE:** Owner access permission is required.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_analytics_workspace = azure.operationalinsights.AnalyticsWorkspace("exampleAnalyticsWorkspace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="PerGB2018")
example_workspace = azure.securitycenter.Workspace("exampleWorkspace",
scope="/subscriptions/00000000-0000-0000-0000-000000000000",
workspace_id=example_analytics_workspace.id)
```
## Import
The contact can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:securitycenter/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Security/workspaceSettings/default
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] scope: The scope of VMs to send their security data to the desired workspace, unless overridden by a setting with more specific scope.
:param pulumi.Input[str] workspace_id: The ID of the Log Analytics Workspace to save the data in.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WorkspaceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages the subscription's Security Center Workspace.
> **NOTE:** Owner access permission is required.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_analytics_workspace = azure.operationalinsights.AnalyticsWorkspace("exampleAnalyticsWorkspace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="PerGB2018")
example_workspace = azure.securitycenter.Workspace("exampleWorkspace",
scope="/subscriptions/00000000-0000-0000-0000-000000000000",
workspace_id=example_analytics_workspace.id)
```
## Import
The contact can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:securitycenter/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Security/workspaceSettings/default
```
:param str resource_name: The name of the resource.
:param WorkspaceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
scope: Optional[pulumi.Input[str]] = None,
workspace_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WorkspaceArgs.__new__(WorkspaceArgs)
if scope is None and not opts.urn:
raise TypeError("Missing required property 'scope'")
__props__.__dict__["scope"] = scope
if workspace_id is None and not opts.urn:
raise TypeError("Missing required property 'workspace_id'")
__props__.__dict__["workspace_id"] = workspace_id
super(Workspace, __self__).__init__(
'azure:securitycenter/workspace:Workspace',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
scope: Optional[pulumi.Input[str]] = None,
workspace_id: Optional[pulumi.Input[str]] = None) -> 'Workspace':
"""
Get an existing Workspace resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] scope: The scope of VMs to send their security data to the desired workspace, unless overridden by a setting with more specific scope.
:param pulumi.Input[str] workspace_id: The ID of the Log Analytics Workspace to save the data in.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _WorkspaceState.__new__(_WorkspaceState)
__props__.__dict__["scope"] = scope
__props__.__dict__["workspace_id"] = workspace_id
return Workspace(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def scope(self) -> pulumi.Output[str]:
"""
The scope of VMs to send their security data to the desired workspace, unless overridden by a setting with more specific scope.
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> pulumi.Output[str]:
"""
The ID of the Log Analytics Workspace to save the data in.
"""
return pulumi.get(self, "workspace_id")
|
lib/cherrypy/_cpnative_server.py | 0x20Man/Watcher3 | 320 | 11139228 | """Native adapter for serving CherryPy via its builtin server."""
import logging
import sys
import io
import cheroot.server
import cherrypy
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil
class NativeGateway(cheroot.server.Gateway):
recursive = False
def respond(self):
req = self.req
try:
# Obtain a Request object from CherryPy
local = req.server.bind_addr
local = httputil.Host(local[0], local[1], '')
remote = req.conn.remote_addr, req.conn.remote_port
remote = httputil.Host(remote[0], remote[1], '')
scheme = req.scheme
sn = cherrypy.tree.script_name(req.uri or '/')
if sn is None:
self.send_response('404 Not Found', [], [''])
else:
app = cherrypy.tree.apps[sn]
method = req.method
path = req.path
qs = req.qs or ''
headers = req.inheaders.items()
rfile = req.rfile
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(
local, remote, scheme, 'HTTP/1.1')
request.multithread = True
request.multiprocess = False
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the
# response
try:
request.run(method, path, qs,
req.request_protocol, headers, rfile)
break
except cherrypy.InternalRedirect:
ir = sys.exc_info()[1]
app.release_serving()
prev = request
if not self.recursive:
if ir.path in redirections:
raise RuntimeError(
'InternalRedirector visited the same '
'URL twice: %r' % ir.path)
else:
# Add the *previous* path_info + qs to
# redirections.
if qs:
qs = '?' + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = 'GET'
path = ir.path
qs = ir.query_string
rfile = io.BytesIO()
self.send_response(
response.output_status, response.header_list,
response.body)
finally:
app.release_serving()
except:
tb = format_exc()
# print tb
cherrypy.log(tb, 'NATIVE_ADAPTER', severity=logging.ERROR)
s, h, b = bare_error()
self.send_response(s, h, b)
def send_response(self, status, headers, body):
req = self.req
# Set response status
req.status = str(status or '500 Server Error')
# Set response headers
for header, value in headers:
req.outheaders.append((header, value))
if (req.ready and not req.sent_headers):
req.sent_headers = True
req.send_headers()
# Set response body
for seg in body:
req.write(seg)
class CPHTTPServer(cheroot.server.HTTPServer):
"""Wrapper for cheroot.server.HTTPServer.
cheroot has been designed to not reference CherryPy in any way,
so that it can be used in other frameworks and applications.
Therefore, we wrap it here, so we can apply some attributes
from config -> cherrypy.server -> HTTPServer.
"""
def __init__(self, server_adapter=cherrypy.server):
self.server_adapter = server_adapter
server_name = (self.server_adapter.socket_host or
self.server_adapter.socket_file or
None)
cheroot.server.HTTPServer.__init__(
self, server_adapter.bind_addr, NativeGateway,
minthreads=server_adapter.thread_pool,
maxthreads=server_adapter.thread_pool_max,
server_name=server_name)
self.max_request_header_size = (
self.server_adapter.max_request_header_size or 0)
self.max_request_body_size = (
self.server_adapter.max_request_body_size or 0)
self.request_queue_size = self.server_adapter.socket_queue_size
self.timeout = self.server_adapter.socket_timeout
self.shutdown_timeout = self.server_adapter.shutdown_timeout
self.protocol = self.server_adapter.protocol_version
self.nodelay = self.server_adapter.nodelay
ssl_module = self.server_adapter.ssl_module or 'pyopenssl'
if self.server_adapter.ssl_context:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain)
self.ssl_adapter.context = self.server_adapter.ssl_context
elif self.server_adapter.ssl_certificate:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain)
|
neuralmonkey/trainers/__init__.py | kasnerz/neuralmonkey | 446 | 11139243 | from .cross_entropy_trainer import CrossEntropyTrainer
from .delayed_update_trainer import DelayedUpdateTrainer
from .multitask_trainer import MultitaskTrainer
|
deepsnap/dataset.py | ruth-ann/deepsnap | 412 | 11139265 | <reponame>ruth-ann/deepsnap
import copy
import math
import types
import random
import networkx as nx
import numpy as np
import torch
import deepsnap
from deepsnap.graph import Graph
from deepsnap.hetero_graph import HeteroGraph
import pdb
from typing import (
Dict,
List,
Union
)
import warnings
class Generator(object):
r"""
Abstract class of on the fly generator used in the dataset.
It generates on the fly graphs, which will be fed into the model.
"""
def __init__(self, sizes, size_prob=None, dataset_len=0):
self.sizes = sizes
if sizes is not None:
if size_prob is None:
self.size_prob = np.ones(len(sizes)) / len(sizes)
else:
self.size_prob = size_prob
# by default length of generator dataset is 0
self._len = dataset_len
def __len__(self):
return self._len
def set_len(self, dataset_len):
self._len = dataset_len
def _get_size(self, size=None):
if size is None:
return np.random.choice(
self.sizes, size=1, replace=True, p=self.size_prob
)[0]
else:
return size
@property
def num_node_labels(self):
return 0
@property
def num_nodes(self):
return 0
@property
def num_edge_labels(self):
return 0
@property
def num_edges(self):
return 0
@property
def num_graph_labels(self):
return 0
def generate(self):
r"""
Overwrite in subclass. Generates and returns a
:class:`deepsnap.graph.Graph` object
Returns:
:class:`deepsnap.graph.Graph`: A DeepSNAP graph object.
"""
return Graph(nx.Graph())
class EnsembleGenerator(Generator):
def __init__(self, generators, gen_prob=None, dataset_len=0):
r"""
A generator that is an ensemble of many generators.
Args:
generators (List[:class:`Generator`]): A list of Generators.
gen_prob (Array like): An array like (list) probabilities with
the same length as generators. It specifies the probability
of sampling from each generator. If it is `None`, the
:class:`EnsembleGenerator` will uniformly sample a generator.
"""
super(EnsembleGenerator, self).__init__(None, dataset_len=dataset_len)
if gen_prob is None:
self.gen_prob = np.ones(len(generators)) / len(generators)
else:
self.gen_prob = gen_prob
self.generators = generators
@property
def num_node_labels(self):
r"""
Returns number of the node labels in the generated graphs.
Returns:
int: The number of node labels.
"""
# TODO: change to unique as what we did in graph.py
return max([gen.num_node_labels for gen in self.generators])
@property
def num_nodes(self):
r"""
Returns number of the nodes in each generated graphs.
Returns:
list: List of the number of nodes.
"""
return [gen.num_nodes for gen in self.generators]
@property
def num_edge_labels(self):
r"""
Returns number of the edge labels in the generated graphs.
Returns:
int: The number of edge labels.
"""
# TODO: change to unique as what we did in graph.py
return max([gen.num_edge_labels for gen in self.generators])
@property
def num_edges(self):
r"""
Returns number of the edges in each generated graphs.
Returns:
list: List of the number of edges.
"""
return [gen.num_edges for gen in self.generators]
@property
def num_graph_labels(self):
r"""
Returns number of the graph labels in the generated graphs.
Returns:
int: The number of graph labels.
"""
# TODO: change to unique as what we did in graph.py
return max([gen.num_graph_labels for gen in self.generators])
def generate(self, **kwargs):
r"""
Generate a list of graphs.
Returns:
list: Generated a list of :class:`deepsnap.graph.Graph` objects.
"""
gen = np.random.choice(self.generators, 1, p=self.gen_prob)[0]
return gen.generate(**kwargs)
class GraphDataset(object):
r"""
A plain python object modeling a list of :class:`deepsnap.graph.Graph`
objects with various (optional) attributes.
Args:
graphs (list, optional): A list of :class:`deepsnap.graph.Graph`.
task (str): The task that this :class:`GraphDataset` is used for
(task = `node` or `edge` or `link_pred` or `graph`).
custom_split_graphs (list): A list of 2 (train and val)
or 3 (train, val and test) lists of splitted graphs, used in
custom split of the `graph` task.
edge_negative_sampling_ratio (float): The number of negative
samples compared to that of positive edges. Default value
is 1.
edge_message_ratio (float): The number of message passing edges
compared to that of training supervision edges. Default value
is 0.8.
edge_train_mode (str): Use `all` or `disjoint`. In `all`
mode, training supervision edges are same with the message
passing edges. In `disjoint` mode, training supervision
objectives are different from the message passing edges.
The difference between these two modes please see
the `DeepSNAP link prediction Colab <https://colab.research.
google.com/drive/1ycdlJuse7l2De7wi51lFd_nCuaWgVABc?
usp=sharing>`_.
edge_split_mode (str): Use `exact` or `approximate`. This mode is
designed for the heterogeneous graph. If the mode is `exact`,
split the heterogeneous graph according to both the ratio
and the split type. If the mode is `approximate`, split the
heterogeneous graph regardless of the split type.
minimum_node_per_graph (int): If the number of nodes of a graph
is smaller than the minimum node per graph, that graph will
be filtered out.
generator (:class:`Generator`): The dataset will be on-the-fly
generated. The on-the-fly generator will be used, if the
:obj:`self.graphs` is empty or `None`, and the generator
(:class:`Generator`) is provided with an overwritten
:meth:`generate` method.
resample_negatives (bool): Whether to resample negative edges in
each iteration of the `link_pred` task. User needs to set this
variable in the case of tensor backend for the custom split.
resample_disjoint (bool): Whether to resample disjoint training
edges in the `disjonint` `link_pred` task.
resample_disjoint_period (int): The number of iterations after
which the training edges in the `disjoint` mode are resampled.
negative_label_val (int, optional): The value of negative edges
generated in link_pred task. User needs to set this variable
in the case of tensor backend custom split.
netlib (types.ModuleType, optional): The graph backend module.
Currently DeepSNAP supports the NetworkX and SnapX (for
SnapX only the undirected homogeneous graph) as the graph
backend. Default graph backend is the NetworkX.
"""
def __init__(
self,
graphs: List[Graph] = None,
task: str = "node",
custom_split_graphs: List[Graph] = None,
edge_negative_sampling_ratio: float = 1,
edge_message_ratio: float = 0.8,
edge_train_mode: str = "all",
edge_split_mode: str = "exact",
minimum_node_per_graph: int = 5,
generator=None,
resample_negatives: bool = False,
resample_disjoint: bool = False,
resample_disjoint_period: int = 1,
negative_label_val: int = None,
netlib=None
):
if netlib is not None:
deepsnap._netlib = netlib
if graphs is not None:
# make sure graphs is a list
if not isinstance(graphs, list):
graphs = [graphs]
# support user' input a list of netlib.Graph instead of Graph
for i, graph in enumerate(graphs):
if not isinstance(graph, Graph):
graphs[i] = Graph(graph, netlib=netlib)
# validity check for `task`
if task not in ["node", "edge", "link_pred", "graph"]:
raise ValueError(
"task must be one of node, edge, link_pred or graph."
)
# validity check for `edge_train_mode`
if edge_train_mode not in ["all", "disjoint"]:
raise ValueError("edge_train_mode must be all or disjoint.")
# validity check for `edge_split_mode`
if edge_split_mode not in ["exact", "approximate"]:
raise ValueError(
"edge_split_mode must be exact or approximate."
)
# validity check for `resample_negatives`
if resample_negatives and (task != "link_pred"):
raise ValueError(
"resample_negatives set to True only make sense "
"when self.task is link_pred."
)
# validity check for `resample_disjoint`
# if resample_negatives and (self.task != "link_pred"):
if (
resample_disjoint
and (
(task != "link_pred")
or (edge_train_mode != "disjoint")
)
):
raise ValueError(
"resample_disjoint set to True only make sense "
"when self.task is `link_pred` and edge_train_mode is "
"disjoint."
)
# validity check for `resample_negatives`
if (negative_label_val is not None) and (task != "link_pred"):
raise ValueError(
"negative_label_val is set only make sense "
"when self.task is link_pred."
)
# parameter initialization
self.graphs = graphs
self.task = task
self.custom_split_graphs = custom_split_graphs
self.edge_message_ratio = edge_message_ratio
self.edge_negative_sampling_ratio = edge_negative_sampling_ratio
self.edge_train_mode = edge_train_mode
self.edge_split_mode = edge_split_mode
self.minimum_node_per_graph = minimum_node_per_graph
self.resample_negatives = resample_negatives
self.resample_disjoint = resample_disjoint
self.resample_disjoint_period = resample_disjoint_period
self.negative_label_val = negative_label_val
# set private parameters
self._split_types = None
self._is_tensor = False
# graphs preprocessing
if graphs is None or len(graphs) == 0:
if generator is None:
raise ValueError("Graphs are None")
else:
# on-the-fly dataset
self.generator = generator
self.graphs = None
self.otf_device = None
elif generator is not None:
raise ValueError(
"Both graphs and on-the-fly generator are "
"provided (only one should be provided."
)
else:
# by default on-the-fly generator is not used
# when generator is not provide
self.generator = None
# filter graphs that are too small
if self.minimum_node_per_graph > 0:
graphs_filter = []
for idx, graph in enumerate(self.graphs):
if isinstance(graph, Graph):
if isinstance(graph, HeteroGraph):
if (
sum(graph.num_nodes().values())
>= self.minimum_node_per_graph
):
graphs_filter.append(graph)
else:
warnings.warn(
f"the {idx}-th graph in self.graphs is "
"filtered out as it contains "
f"{sum(graph.num_nodes().values())} nodes,"
" which is less than "
"self.minimum_node_per_graph: "
f"{self.minimum_node_per_graph}."
)
else:
if graph.num_nodes >= self.minimum_node_per_graph:
graphs_filter.append(graph)
else:
warnings.warn(
f"the {idx}-th graph in self.graphs is "
"filtered out as it contains "
f"{graph.num_nodes} nodes,"
" which is less than "
"self.minimum_node_per_graph: "
f"{self.minimum_node_per_graph}."
)
else:
raise TypeError(
"element in self.graphs of unexpected type"
)
self.graphs = graphs_filter
# update graph in self.graphs with appropriate custom
# split component
for graph in self.graphs:
if not hasattr(graph, "_custom_update_flag"):
# assign task to graph
graph.task = self.task
# custom support
if isinstance(graph, Graph):
if isinstance(graph, HeteroGraph):
mapping = {
x: x
for x in range(sum(graph.num_nodes().values()))
}
else:
mapping = {x: x for x in range(graph.num_nodes)}
else:
raise TypeError(
"element in self.graphs of unexpected type"
)
graph._custom_update(mapping)
# update graph in self.graphs with negative_label_val
if self.task == "link_pred":
if self.negative_label_val is None:
negative_label_val = 0
for graph in self.graphs:
if (
hasattr(graph, "edge_label")
and (graph.edge_label is not None)
and (self.task == "link_pred")
):
if isinstance(graph, Graph):
if isinstance(graph, HeteroGraph):
for message_type in graph.edge_label:
negative_label_val = max(
negative_label_val,
torch.max(
graph.edge_label[message_type]
).item() + 1
)
else:
negative_label_val = max(
negative_label_val,
torch.max(graph.edge_label).item() + 1
)
else:
raise TypeError(
"element in self.graphs of unexpected type"
)
self.negative_label_val = negative_label_val
for graph in self.graphs:
graph.negative_label_val = (
copy.deepcopy(negative_label_val)
)
self._update_tensor_negative_edges()
self._custom_mode_update()
self._reset_cache()
def _update_tensor_negative_edges(self):
r"""
Create negative edges and labels for tensor backend link_pred case.
"""
if self.task != "link_pred":
return
if not all([graph.G is None for graph in self.graphs]):
return
any_negative_edges = any(
["negative_edge" in graph.keys for graph in self.graphs]
)
all_negative_edges = all(
["negative_edge" in graph.keys for graph in self.graphs]
)
if (not all_negative_edges) and any_negative_edges:
raise ValueError(
"either all graphs have negative edges or no graphs have "
"negative edges."
)
else:
self._is_tensor = True
for graph in self.graphs:
graph._edge_label = copy.deepcopy(graph.edge_label)
graph._edge_label_index = copy.deepcopy(graph.edge_label_index)
if all_negative_edges:
graph._custom_create_neg_sampling(
self.edge_negative_sampling_ratio, resample=False
)
else:
graph._create_neg_sampling(
self.edge_negative_sampling_ratio, resample=False
)
def __len__(self) -> int:
r"""
Returns:
int: The number of graph in graphs.
"""
if self.graphs is None:
return len(self.generator)
else:
return len(self.graphs)
@property
def num_node_features(self) -> int:
r"""
Returns the node feature dimension.
Returns:
int: The node feature dimension for the graphs
in the dataset.
"""
return self._graph_example.num_node_features
@property
def num_node_labels(self) -> int:
r"""
Returns the number of node labels.
Returns:
int: The number of node labels for the graphs
in the dataset.
"""
if self._num_node_labels is None:
if self.graphs is None:
self._num_node_labels = self.generator.num_node_labels
else:
unique_node_labels = torch.LongTensor([])
for graph in self.graphs:
unique_node_labels = torch.cat([
unique_node_labels, graph.get_num_labels("node_label")
])
self._num_node_labels = torch.unique(
unique_node_labels
).shape[0]
return self._num_node_labels
@property
def num_nodes(self) -> List[int]:
r"""
Return the number of nodes for the graphs in the dataset.
Returns:
list: A list of number of nodes for the graphs
in the dataset.
"""
if self._num_nodes is None:
if self.graphs is None:
self._num_nodes = self.generator.num_nodes
else:
self._num_nodes = (
[graph.num_nodes for graph in self.graphs]
)
return self._num_nodes
@property
def num_edge_features(self) -> int:
r"""
Returns the edge feature dimension.
Returns:
int: The edge feature dimension for the graphs
in the dataset.
"""
return self._graph_example.num_edge_features
@property
def num_edge_labels(self) -> int:
r"""
Returns the number of edge labels.
Returns:
int: The number of edge labels for the graphs
in the dataset.
"""
if self._num_edge_labels is None:
if self.graphs is None:
self._num_edge_labels = self.generator.num_edge_labels
else:
unique_edge_labels = torch.LongTensor([])
for graph in self.graphs:
unique_edge_labels = torch.cat([
unique_edge_labels, graph.get_num_labels("edge_label")
])
self._num_edge_labels = torch.unique(
unique_edge_labels
).shape[0]
return self._num_edge_labels
@property
def num_edges(self) -> List[int]:
r"""
Return the number of edges for the graphs in the dataset.
Returns:
list: A list of number of edges for the graphs
in the dataset.
"""
if self._num_edges is None:
if self.graphs is None:
self._num_edges = self.generator.num_edges
else:
self._num_edges = (
[graph.num_edges for graph in self.graphs]
)
return self._num_edges
@property
def num_graph_features(self) -> int:
r"""
Returns the graph feature dimension.
Returns:
int: The graph feature dimension for the graphs
in the dataset.
"""
return self._graph_example.num_graph_features
@property
def num_graph_labels(self) -> int:
r"""
Returns the number of graph labels.
Returns:
int: The number of graph labels for the graphs
in the dataset.
"""
if self._num_graph_labels is None:
if self.graphs is None:
self._num_graph_labels = self.generator.num_graph_labels
else:
unique_graph_labels = torch.LongTensor([])
for graph in self.graphs:
unique_graph_labels = torch.cat([
unique_graph_labels,
graph.get_num_labels("graph_label")
])
self._num_graph_labels = torch.unique(
unique_graph_labels
).shape[0]
return self._num_graph_labels
@property
def num_labels(self) -> int:
r"""
A General wrapper that returns the number of labels depending on
the task.
Returns:
int: The number of labels, depending on the task.
"""
if self.task == "node":
return self.num_node_labels
elif self.task == "edge" or self.task == "link_pred":
return self.num_edge_labels
elif self.task == "graph":
return self.num_graph_labels
else:
raise ValueError(f"Task {self.task} not supported")
def num_dims_dict(self) -> Dict[str, int]:
r"""
Dimensions of all fields.
Returns:
dict: Dimensions of all fields. For example, if
graphs have two attributes the `node_feature`
and the `graph_label`. The returned dictionary will
have two keys, `node_feature` and `graph_label`, and
two values, node feature dimension and graph label
dimension.
"""
dim_dict = {}
for key in self._graph_example.keys:
tensor = self._graph_example[key]
if not torch.is_tensor(tensor):
continue
if tensor.ndim == 1:
dim_dict[key] = 1
elif tensor.ndim == 2:
dim_dict[key] = tensor.size()[-1]
else:
raise ValueError(f"Dimension of tensor {key} exceeds 2.")
return dim_dict
def _custom_mode_update(self):
r"""
Update self.general_splits_mode, self.disjoint_split_mode &
self.negative_edges_mode to indicate whether we are working on custom
support for:
(1) general transductive or inductive custom split
or (2) disjoint train custom split in disjoint link_pred task
or (3) custom negative edges in link_pred task
"""
custom_keys = ["general_splits", "disjoint_split", "negative_edges"]
for custom_key in custom_keys:
self[f"{custom_key}_mode"] = "random"
# make sure custom appeared in all graphs or no custom appeared
# in any graphs
custom_in_graphs = all(
(graph.custom is not None) for graph in self.graphs
)
custom_not_in_graphs = all(
(graph.custom is None) for graph in self.graphs
)
if not custom_in_graphs and not custom_not_in_graphs:
raise ValueError(
"custom needs to be in all graphs or not in any graphs"
)
if custom_in_graphs:
for custom_key in custom_keys:
custom_key_in_custom = all(
(custom_key in graph.custom)
for graph in self.graphs
)
custom_key_not_in_custom = all(
(custom_key not in graph.custom) for graph in self.graphs
)
if not custom_key_in_custom and not custom_key_not_in_custom:
raise ValueError(
f"{custom_key} needs to be in all `graph.custom`s or "
"not in any `graph.custom`s"
)
if custom_key_in_custom:
self[f"{custom_key}_mode"] = "custom"
# custom inductive splits
if self.custom_split_graphs is not None:
self.general_splits_mode = "custom"
def _split_transductive(
self,
split_ratio: List[float],
split_types: List[str] = None,
shuffle: bool = True
) -> List[Graph]:
r"""
Split the dataset assuming training process is transductive.
Args:
split_ratio: number of data splitted into train, validation
(and test) set.
Returns:
list: A list of 3 (2) lists of :class:`deepsnap.graph.Graph`
object corresponding to train, validation (and test) set.
"""
split_graphs = []
for graph in self.graphs:
if self.general_splits_mode == "custom":
split_graph = graph._custom_split(
task=self.task
)
elif self.general_splits_mode == "random":
if isinstance(graph, Graph):
if isinstance(graph, HeteroGraph):
split_graph = graph.split(
task=self.task,
split_types=split_types,
split_ratio=split_ratio,
edge_split_mode=self.edge_split_mode,
shuffle=shuffle
)
else:
split_graph = graph.split(
self.task, split_ratio, shuffle=shuffle
)
else:
raise TypeError(
"element in self.graphs of unexpected type"
)
split_graphs.append(split_graph)
split_graphs = list(map(list, zip(*split_graphs)))
if self.disjoint_split_mode == "custom":
# resample_disjoint when in disjoint split custom mode
# would override the custom disjoint split edges
self.resample_disjoint = False
for i, graph in enumerate(split_graphs[0]):
if (
self.task == "link_pred"
and self.edge_train_mode == "disjoint"
):
graph = graph._custom_split_link_pred_disjoint()
split_graphs[0][i] = graph
elif self.disjoint_split_mode == "random":
for i, graph in enumerate(split_graphs[0]):
if (
self.task == "link_pred"
and self.edge_train_mode == "disjoint"
):
if isinstance(graph, Graph):
# store the original edge_label
graph_edge_label = None
if (
self.resample_disjoint
and hasattr(graph, "edge_label")
):
graph_edge_label = graph.edge_label
if isinstance(graph, HeteroGraph):
graph = graph.split_link_pred(
split_types=split_types,
split_ratio=self.edge_message_ratio,
edge_split_mode=self.edge_split_mode
)[1]
else:
graph = graph.split_link_pred(
self.edge_message_ratio
)[1]
graph._is_train = True
split_graphs[0][i] = graph
# save the original edge_label
if graph_edge_label is not None:
graph._edge_label = copy.deepcopy(graph_edge_label)
else:
graph._edge_label = None
else:
raise TypeError(
"element in self.graphs of unexpected type"
)
# list of num_splits datasets
# (e.g. [train dataset, val dataset, test dataset])
dataset_return = []
if self.negative_edges_mode == "random":
for x in split_graphs:
dataset_current = copy.copy(self)
dataset_current.graphs = x
if self.task == "link_pred":
for graph_temp in dataset_current.graphs:
if isinstance(graph_temp, Graph):
if isinstance(graph_temp, HeteroGraph):
graph_temp._create_neg_sampling(
negative_sampling_ratio=(
self.edge_negative_sampling_ratio
),
split_types=split_types
)
else:
graph_temp._create_neg_sampling(
self.edge_negative_sampling_ratio
)
else:
raise TypeError(
"element in self.graphs of unexpected type"
)
dataset_return.append(dataset_current)
elif self.negative_edges_mode == "custom":
for i, x in enumerate(split_graphs):
dataset_current = copy.copy(self)
dataset_current.graphs = x
if self.task == "link_pred":
for j, graph_temp in enumerate(dataset_current.graphs):
if isinstance(graph_temp, Graph):
graph_temp.negative_edge = (
graph_temp.negative_edges[i]
)
if isinstance(graph_temp, HeteroGraph):
graph_temp._custom_create_neg_sampling(
self.edge_negative_sampling_ratio,
split_types=split_types
)
else:
graph_temp._custom_create_neg_sampling(
self.edge_negative_sampling_ratio
)
else:
raise TypeError(
"element in self.graphs of unexpected type"
)
dataset_return.append(dataset_current)
# resample negatives for train split (only for link prediction)
dataset_return[0].resample_negatives = True
return dataset_return
def _split_inductive(
self,
split_ratio: List[float],
split_types: List[str] = None,
shuffle: bool = True
) -> List[Graph]:
r"""
Split the dataset assuming training process is inductive.
Args:
split_ratio: number of data splitted into train, validation
(and test) set.
Returns:
List[Graph]: a list of 3 (2) lists of graph object corresponding
to train, validation (and test) set.
"""
if self.general_splits_mode == "custom":
split_graphs = self.custom_split_graphs
elif self.general_splits_mode == "random":
num_graphs = len(self.graphs)
if num_graphs < len(split_ratio):
raise ValueError(
"in _split_inductive num of graphs are smaller than the "
"number of splitted parts"
)
if shuffle:
self._shuffle()
# a list of num_splits list of graphs
# (e.g. [train graphs, val graphs, test graphs])
split_graphs = []
# If the `default split` policy would result in empty splited
# graphs, `secure split` policy would be used instead
split_empty_flag = False
split_offset = 0
# perform `default split`
for i, split_ratio_i in enumerate(split_ratio):
if i != len(split_ratio) - 1:
num_split_i = int(split_ratio_i * num_graphs)
graphs_split_i = (
self.graphs[split_offset:split_offset + num_split_i]
)
split_offset += num_split_i
else:
graphs_split_i = self.graphs[split_offset:]
if len(graphs_split_i) == 0:
split_empty_flag = True
split_offset = 0
split_graphs = []
break
split_graphs.append(graphs_split_i)
if split_empty_flag:
# perform `secure split` s.t. guarantees all splitted graph
# list contains at least one graph.
for i, split_ratio_i in enumerate(split_ratio):
if i != len(split_ratio) - 1:
num_split_i = (
1 +
int(
split_ratio_i
* (num_graphs - len(split_ratio))
)
)
graphs_split_i = (
self.graphs[
split_offset:split_offset + num_split_i
]
)
split_offset += num_split_i
else:
graphs_split_i = self.graphs[split_offset:]
split_graphs.append(graphs_split_i)
# create objectives for link_pred task
if self.task == "link_pred":
# if disjoint, this will split all graph's edges into 2:
# message passing and objective edges
# which is returned by the [1] of the split graphs
if self.edge_train_mode == "disjoint":
split_start = 0
# in all mode, train graph has all edges used for both
# message passing and objective.
elif self.edge_train_mode == "all":
split_start = 1
for i in range(split_start, len(split_graphs)):
for j in range(len(split_graphs[i])):
graph_temp = split_graphs[i][j]
if isinstance(graph_temp, Graph):
# store the original edge_label
graph_edge_label = None
if (
self.resample_disjoint
and (i == 0)
and hasattr(graph_temp, "edge_label")
):
graph_edge_label = graph_temp.edge_label
if isinstance(graph_temp, HeteroGraph):
graph_temp = (
graph_temp.split_link_pred(
split_types,
self.edge_message_ratio,
self.edge_split_mode
)[1]
)
else:
graph_temp = (
graph_temp.split_link_pred(
self.edge_message_ratio
)[1]
)
# save the original edge_label
if graph_edge_label is not None:
graph_temp._edge_label = (
copy.deepcopy(graph_edge_label)
)
else:
graph_temp._edge_label = None
# set is_train flag
if i == 0:
graph_temp._is_train = True
split_graphs[i][j] = graph_temp
else:
raise TypeError(
"element in self.graphs of unexpected type."
)
# list of num_splits datasets
dataset_return = []
for graphs in split_graphs:
dataset_current = copy.copy(self)
dataset_current.graphs = graphs
if self.task == "link_pred":
for graph_temp in dataset_current.graphs:
if isinstance(graph_temp, Graph):
if isinstance(graph_temp, HeteroGraph):
graph_temp._create_neg_sampling(
negative_sampling_ratio=(
self.edge_negative_sampling_ratio
),
split_types=split_types
)
else:
graph_temp._create_neg_sampling(
self.edge_negative_sampling_ratio
)
else:
raise TypeError(
"element in self.graphs of unexpected type"
)
dataset_return.append(dataset_current)
# resample negatives for train split (only for link prediction)
dataset_return[0].resample_negatives = True
return dataset_return
def split(
self,
transductive: bool = True,
split_ratio: List[float] = None,
split_types: Union[str, List[str]] = None,
shuffle: bool = True
) -> List[Graph]:
r"""
Split the dataset into train, validation (and test) sets.
Args:
transductive (bool): Whether the learning is transductive
(`True`) or inductive (`False`). Inductive split is
always used for the graph-level task, :obj:`self.task`
equals to `graph`.
split_ratio (list): A list of ratios such as
`[train_ratio, validation_ratio, test_ratio]`.
split_types (str or list): Types splitted on. Default is `None`.
shuffle (bool): Whether to shuffle data for the splitting.
Returns:
list: A list of 3 (2) :class:`deepsnap.dataset.GraphDataset`
objects corresponding to the train, validation (and test) sets.
"""
if self.graphs is None:
raise RuntimeError(
"Split is not supported for on-the-fly dataset. "
"Construct different on-the-fly datasets for train, val "
"and test. Or perform split at batch level."
)
if split_ratio is None:
split_ratio = [0.8, 0.1, 0.1]
if not isinstance(split_ratio, list):
raise TypeError("Split ratio must be a list.")
if len(split_ratio) > 3:
raise ValueError(
"Split ratio must contain less than or equal to three values."
)
if not math.isclose(sum(split_ratio), 1.0):
raise ValueError("Split ratio must sum up to 1.")
if not all(
isinstance(split_ratio_i, float)
for split_ratio_i in split_ratio
):
raise TypeError("Split ratio must contain all floats.")
if not all(split_ratio_i > 0 for split_ratio_i in split_ratio):
raise ValueError("Split ratio must contain all positivevalues.")
# store the most recent split types
self._split_types = split_types
# check self._is_tensor
if self._is_tensor:
for graph in self.graphs:
graph.edge_label_index = graph._edge_label_index
graph.edge_label = graph._edge_label
# list of num_splits datasets
dataset_return = []
if transductive:
if self.task == "graph":
raise ValueError(
"in transductive mode, self.task is graph does not "
"make sense."
)
dataset_return = (
self._split_transductive(
split_ratio, split_types, shuffle=shuffle
)
)
else:
dataset_return = (
self._split_inductive(
split_ratio,
split_types,
shuffle=shuffle
)
)
return dataset_return
def resample_disjoint(self):
r"""
Resample splits of the message passing and supervision edges in the
`disjoint` mode.
.. note::
If :meth:`apply_transform` (on the message passing graph)
was used before this resampling, it needs to be
re-applied after resampling, to update some of the (supervision)
edges that were in the objectives.
"""
if self.graphs is None:
raise RuntimeError(
"Resampling disjoint is not needed for on-the-fly dataset. "
"Split the on-the-fly data as the batch arrives."
)
graphs = []
for graph in self.graphs:
graphs.append(graph.resample_disjoint(self.edge_message_ratio))
self.graphs = graphs
def _reset_cache(self):
r"""
Resets internal cache for graph examples, num_node_labels etc.
"""
self._num_node_labels = None
self._num_nodes = None
self._num_edge_labels = None
self._num_edges = None
self._num_graph_labels = None
# TODO: consider the heterogeneous graph case
if self.graphs is None:
self._graph_example = self.generator.generate()
if not isinstance(self._graph_example, Graph):
self._graph_example = Graph(self._graph_example)
else:
self._graph_example = self.graphs[0]
def apply_transform(
self, transform,
update_tensor: bool = True,
update_graph: bool = False,
deep_copy: bool = False,
**kwargs
):
r"""
Applies transformation to all graph objects. All graphs in
:obj:`self.graphs` will be run by the specified
:meth:`transform` function, and then a new
:class:`GraphDataset` object will be returned.
Args:
transform (callable): User-defined transformation function.
update_tensor (bool): If the graphs have changed, use the
graph to update the stored tensor attributes.
update_graph (bool): If the tensor attributes have changed,
use the attributes to update the graphs.
deep_copy (bool): If `True`, all graphs will be deepcopied
and then fed into the :meth:`transform` function.
In this case, the :meth:`transform` function also might
need to return a `Graph` object.
**kwargs (optional): Parameters used in the :meth:`transform` function
for each `Graph` object.
Returns:
:class:`GraphDataset`: A new :class:`GraphDataset` object with
transformed graphs.
"""
# currently does not support transform for on-the-fly dataset
if self.graphs is None:
raise ValueError(
"On-the-fly datasets do not support transform. "
"Transform can be done at the batch level."
)
# TODO: parallel apply
new_dataset = copy.copy(self)
new_dataset.graphs = [
graph.apply_transform(
transform, update_tensor, update_graph,
deep_copy, **kwargs
)
for graph in self.graphs
]
# update example graph used for num_node_features etc.
new_dataset._reset_cache()
return new_dataset
def filter(self, filter_fn, deep_copy: bool = False, **kwargs):
r"""
Filter the graphs in the dataset. Discarding a graph `G`
when `filter_fn(G)` is `False`. :meth:`apply_transform` is an
analog of the Python `map` function, while :meth:`filter`
is an analog of the Python `filter` function.
Args:
filter_fn: User-defined filter function that returns `True`
(keep) or `False` (discard) the graph object in
the dataset.
deep_copy: If `True`, all graphs will be deepcopied and
then fed into the :meth:`filter` function.
**kwargs: Parameters used in the :meth:`filter` function.
Returns:
:class:`GraphDataset`: A new :class:`GraphDataset` object with
graphs filtered.
"""
# currently does not support filter for on-the-fly dataset
if self.graphs is None:
raise ValueError(
"On-the-fly datasets do not support transform."
"Filter can be done at the batch level."
)
new_dataset = copy.copy(self)
new_dataset.graphs = [
graph for graph in self.graphs if filter_fn(graph, **kwargs)]
# update example graph used for num_node_features etc.
new_dataset._reset_cache()
return new_dataset
def to(self, device):
r"""
Transfer the graphs in the dataset to specified device.
Args:
device (str): Specified device name, such as `cpu` or
`cuda`.
"""
if self.graphs is None:
self.otf_device = device
else:
for graph in self.graphs:
graph.to(device)
def _shuffle(self):
r"""
shuffle Graph object in graphs.
"""
if self.graphs is not None:
random.shuffle(self.graphs)
@staticmethod
def pyg_to_graphs(
dataset,
verbose: bool = False,
fixed_split: bool = False,
tensor_backend: bool = False,
netlib=None
) -> List[Graph]:
r"""
Transform a :class:`torch_geometric.data.Dataset` object to a
list of :class:`deepsnap.grpah.Graph` objects.
Args:
dataset (:class:`torch_geometric.data.Dataset`): A
:class:`torch_geometric.data.Dataset` object that will be
transformed to a list of :class:`deepsnap.grpah.Graph`
objects.
verbose (bool): Whether to print information such as warnings.
fixed_split (bool): Whether to load the fixed data split from
the original PyTorch Geometric dataset.
tensor_backend (bool): `True` will use pure tensors for graphs.
netlib (types.ModuleType, optional): The graph backend module.
Currently DeepSNAP supports the NetworkX and SnapX (for
SnapX only the undirected homogeneous graph) as the graph
backend. Default graph backend is the NetworkX.
Returns:
list: A list of :class:`deepsnap.graph.Graph` objects.
"""
if fixed_split:
graphs = [
Graph.pyg_to_graph(
data, verbose=verbose, fixed_split=True,
tensor_backend=tensor_backend, netlib=netlib
)
for data in dataset
]
graphs_split = [[graph] for graph in graphs[0]]
return graphs_split
else:
return [
Graph.pyg_to_graph(
data, verbose=verbose,
tensor_backend=tensor_backend,
netlib=netlib
)
for data in dataset
]
def __getitem__(self, idx: int) -> Union[Graph, List[Graph]]:
r"""
Takes in an integer (or a list of integers)
returns a single Graph object (a subset of graphs).
Args:
idx: index to be selected from graphs.
Returns:
Union[:class:`deepsnap.graph.Graph`,
List[:class:`deepsnap.graph.Graph`]]: A single
:class:`deepsnap.graph.Graph` object or subset
of :class:`deepsnap.graph.Graph` objects.
"""
# TODO: add the hetero graph equivalent of these functions
if self.graphs is None:
graph = self.generator.generate()
if not isinstance(graph, Graph):
graph = Graph(graph)
# generated an networkx graph
if self.otf_device is not None:
graph.to(self.otf_device)
elif isinstance(idx, int):
graph = self.graphs[idx]
else:
# sliceing of dataset
dataset = self._index_select(idx)
return dataset
# resample disjoint training data only when the task is
# disjoint link_pred and self.resample_disjoint is set to True
if (
self.task == "link_pred"
and self.edge_train_mode == "disjoint"
and self.resample_disjoint
and graph._is_train
):
if not hasattr(graph, "resample_disjoint_period"):
graph.resample_disjoint_period = self.resample_disjoint_period
if isinstance(graph, Graph):
if isinstance(graph, HeteroGraph):
graph = graph.resample_disjoint(
split_types=self._split_types,
message_ratio=self.edge_message_ratio
)
else:
graph = graph.resample_disjoint(self.edge_message_ratio)
else:
raise TypeError(
"element in self.graphs of unexpected type."
)
if self.task == "link_pred" and self.resample_negatives:
resample_negative_flag = True
# after graph just resampled disjoint training data
# graph.edge_label is reset to original state,
# the negative sampling process needs to utilize
# resample=False mode.
if (
hasattr(graph, "_resample_disjoint_flag")
and graph._resample_disjoint_flag
):
resample_negative_flag = False
# resample negative examples
if isinstance(graph, Graph):
if isinstance(graph, HeteroGraph):
if self.negative_edges_mode == "random":
graph._create_neg_sampling(
self.edge_negative_sampling_ratio,
split_types=self._split_types,
resample=resample_negative_flag
)
elif self.negative_edges_mode == "custom":
graph._custom_create_neg_sampling(
self.edge_negative_sampling_ratio,
split_types=self._split_types,
resample=resample_negative_flag
)
else:
if self.negative_edges_mode == "random":
graph._create_neg_sampling(
self.edge_negative_sampling_ratio,
resample=resample_negative_flag
)
elif self.negative_edges_mode == "custom":
graph._custom_create_neg_sampling(
self.edge_negative_sampling_ratio,
resample=resample_negative_flag
)
else:
raise TypeError(
"element in self.graphs of unexpected type."
)
if self.graphs is not None and isinstance(idx, int):
self.graphs[idx] = graph
return graph
def __setitem__(self, key: str, value):
"""Sets the attribute :obj:`key` to :obj:`value`."""
setattr(self, key, value)
def _index_select(self, idx: int) -> List[Graph]:
r"""
Takes in a list of integers, returns a subset of graphs
corresponding to the list of integers.
Args:
idx: index to be selected from graphs.
Returns:
List[Graph]: a single Graph object or subset of graphs.
"""
if self.graphs is None:
# _index_select is only called when self.graphs is not None
raise NotImplementedError(
"Index select is not available for on-the-fly dataset."
)
if isinstance(idx, slice):
dataset = copy.copy(self)
dataset.graphs = self.graphs[idx]
elif torch.is_tensor(idx):
if (
idx.dtype == torch.long
or idx.dtype == torch.int
):
dataset = self._index_select(idx.tolist())
elif idx.dtype == torch.bool:
dataset = self._index_select(idx.nonzero().flatten().tolist())
else:
raise TypeError(
f"your index type is {idx.dtype}, only tensor of type "
"torch.long, torch.int or torch.bool are accepted."
)
elif isinstance(idx, list) or isinstance(idx, tuple):
dataset = copy.copy(self)
dataset.graphs = [self.graphs[x] for x in idx]
else:
raise IndexError(
"Only integers, slices (`:`), list, tuples, and long or bool "
f"tensors are valid indices (got {type(idx).__name__})."
)
return dataset
def __repr__(self) -> str: # pragma: no cover
descriptor = (
len(self) if self.graphs is not None else self.generator.__class__
)
return f"{self.__class__.__name__}({descriptor})"
|
tests/tracer/runtime/test_container.py | p7g/dd-trace-py | 308 | 11139277 | import mock
import pytest
from ddtrace.internal.compat import PY2
from ddtrace.internal.runtime.container import CGroupInfo
from ddtrace.internal.runtime.container import get_container_info
from .utils import cgroup_line_valid_test_cases
# Map expected Py2 exception to Py3 name
if PY2:
FileNotFoundError = IOError # noqa: A001
def get_mock_open(read_data=None):
mock_open = mock.mock_open(read_data=read_data)
return mock.patch("ddtrace.internal.runtime.container.open", mock_open)
def test_cgroup_info_init():
# Assert default all attributes to `None`
info = CGroupInfo()
for attr in ("id", "groups", "path", "container_id", "controllers", "pod_id"):
assert getattr(info, attr) is None
# Assert init with property sets property
info = CGroupInfo(container_id="test-container-id")
assert info.container_id == "test-container-id"
@pytest.mark.parametrize(
"line,expected_info",
# Valid generated cases + one off cases
cgroup_line_valid_test_cases()
+ [
# Valid, extra spaces
(
" 13:name=systemd:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860 ",
CGroupInfo(
id="13",
groups="name=systemd",
controllers=["name=systemd"],
path="/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860",
container_id="3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860",
pod_id=None,
),
),
# Valid, bookended newlines
(
"\r\n13:name=systemd:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860\r\n",
CGroupInfo(
id="13",
groups="name=systemd",
controllers=["name=systemd"],
path="/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860",
container_id="3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860",
pod_id=None,
),
),
# Valid, fargate >= 1.4.0
(
"1:name=systemd:/ecs/34dc0b5e626f2c5c4c5170e34b10e765-1234567890",
CGroupInfo(
id="1",
groups="name=systemd",
controllers=["name=systemd"],
path="/ecs/34dc0b5e626f2c5c4c5170e34b10e765-1234567890",
container_id="34dc0b5e626f2c5c4c5170e34b10e765-1234567890",
pod_id=None,
),
),
# Invalid container_ids
(
# One character too short
"13:name=systemd:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f86986",
CGroupInfo(
id="13",
groups="name=systemd",
controllers=["name=systemd"],
path="/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f86986",
container_id=None,
pod_id=None,
),
),
(
# Non-hex
"13:name=systemd:/docker/3726184226f5d3147c25fzyxw5b60097e378e8a720503a5e19ecfdf29f869860",
CGroupInfo(
id="13",
groups="name=systemd",
controllers=["name=systemd"],
path="/docker/3726184226f5d3147c25fzyxw5b60097e378e8a720503a5e19ecfdf29f869860",
container_id=None,
pod_id=None,
),
),
# Invalid id
(
# non-digit
"a:name=systemd:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860",
None,
),
(
# missing
":name=systemd:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860",
None,
),
# Missing group
(
# empty
"13::/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860",
CGroupInfo(
id="13",
groups="",
controllers=[],
path="/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860",
container_id="3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860",
pod_id=None,
),
),
(
# missing
"13:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860",
None,
),
# Empty line
(
"",
None,
),
],
)
def test_cgroup_info_from_line(line, expected_info):
info = CGroupInfo.from_line(line)
info2 = CGroupInfo.from_line(line)
# Check __eq__
assert info == info2
if expected_info is None:
assert info is None, line
else:
for attr in ("id", "groups", "path", "container_id", "controllers", "pod_id"):
assert getattr(info, attr) == getattr(expected_info, attr), line
@pytest.mark.parametrize(
"file_contents,container_id",
(
# Docker file
(
"""
13:name=systemd:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860
12:pids:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860
11:hugetlb:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860
10:net_prio:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860
9:perf_event:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860
8:net_cls:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860
7:freezer:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860
6:devices:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860
5:memory:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860
4:blkio:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860
3:cpuacct:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860
2:cpu:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860
1:cpuset:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860
""",
"3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860",
),
# k8s file
(
"""
11:perf_event:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1
10:pids:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1
9:memory:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1
8:cpu,cpuacct:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1
7:blkio:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1
6:cpuset:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1
5:devices:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1
4:freezer:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1
3:net_cls,net_prio:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1
2:hugetlb:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1
1:name=systemd:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1
""",
"<KEY>",
),
# k8 format with additional characters before task ID
(
"""
1:name=systemd:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2d3da189_6407_48e3_9ab6_78188d75e609.slice/docker-7b8952daecf4c0e44bbcefe1b5c5ebc7b4839d4eefeccefe694709d3809b6199.scope
""",
"7b8952daecf4c0e44bbcefe1b5c5ebc7b4839d4eefeccefe694709d3809b6199",
),
# ECS file
(
"""
9:perf_event:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce
8:memory:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce
7:hugetlb:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce
6:freezer:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce
5:devices:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce
4:cpuset:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce
3:cpuacct:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce
2:cpu:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce
1:blkio:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce
""",
"38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce",
),
# Fargate file < 1.4.0
(
"""
11:hugetlb:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da
10:pids:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da
9:cpuset:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da
8:net_cls,net_prio:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da
7:cpu,cpuacct:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da
6:perf_event:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da
5:freezer:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da
4:devices:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da
3:blkio:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da
2:memory:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da
1:name=systemd:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da
""",
"432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da",
),
# Fargate file >= 1.4.0
(
"""
11:hugetlb:/ecs/55091c13-b8cf-4801-b527-f4601742204d/34dc0b5e626f2c5c4c5170e34b10e765-1234567890
10:pids:/ecs/55091c13-b8cf-4801-b527-f4601742204d/34dc0b5e626f2c5c4c5170e34b10e765-1234567890
9:cpuset:/ecs/55091c13-b8cf-4801-b527-f4601742204d/34dc0b5e626f2c5c4c5170e34b10e765-1234567890
8:net_cls,net_prio:/ecs/55091c13-b8cf-4801-b527-f4601742204d/34dc0b5e626f2c5c4c5170e34b10e765-1234567890
7:cpu,cpuacct:/ecs/55091c13-b8cf-4801-b527-f4601742204d/34dc0b5e626f2c5c4c5170e34b10e765-1234567890
6:perf_event:/ecs/55091c13-b8cf-4801-b527-f4601742204d/34dc0b5e626f2c5c4c5170e34b10e765-1234567890
5:freezer:/ecs/55091c13-b8cf-4801-b527-f4601742204d/34dc0b5e626f2c5c4c5170e34b10e765-1234567890
4:devices:/ecs/55091c13-b8cf-4801-b527-f4601742204d/34dc0b5e626f2c5c4c5170e34b10e765-1234567890
3:blkio:/ecs/55091c13-b8cf-4801-b527-f4601742204d/34dc0b5e626f2c5c4c5170e34b10e765-1234567890
2:memory:/ecs/55091c13-b8cf-4801-b527-f4601742204d/34dc0b5e626f2c5c4c5170e34b10e765-1234567890
1:name=systemd:/ecs/34dc0b5e626f2c5c4c5170e34b10e765-1234567890
""",
"34dc0b5e626f2c5c4c5170e34b10e765-1234567890",
),
# Linux non-containerized file
(
"""
11:blkio:/user.slice/user-0.slice/session-14.scope
10:memory:/user.slice/user-0.slice/session-14.scope
9:hugetlb:/
8:cpuset:/
7:pids:/user.slice/user-0.slice/session-14.scope
6:freezer:/
5:net_cls,net_prio:/
4:perf_event:/
3:cpu,cpuacct:/user.slice/user-0.slice/session-14.scope
2:devices:/user.slice/user-0.slice/session-14.scope
1:name=systemd:/user.slice/user-0.slice/session-14.scope
""",
None,
),
# Empty file
(
"",
None,
),
# Missing file
(
None,
None,
),
),
)
def test_get_container_info(file_contents, container_id):
with get_mock_open(read_data=file_contents) as mock_open:
# simulate the file not being found
if file_contents is None:
mock_open.side_effect = FileNotFoundError
info = get_container_info()
if container_id is None:
assert info is None
else:
assert info.container_id == container_id
mock_open.assert_called_once_with("/proc/self/cgroup", mode="r")
@pytest.mark.parametrize(
"pid,file_name",
(
("13", "/proc/13/cgroup"),
(13, "/proc/13/cgroup"),
("self", "/proc/self/cgroup"),
),
)
def test_get_container_info_with_pid(pid, file_name):
# DEV: We need at least 1 line for the loop to call `CGroupInfo.from_line`
with get_mock_open(read_data="\r\n") as mock_open:
assert get_container_info(pid=pid) is None
mock_open.assert_called_once_with(file_name, mode="r")
@mock.patch("ddtrace.internal.runtime.container.CGroupInfo.from_line")
@mock.patch("ddtrace.internal.runtime.container.log")
def test_get_container_info_exception(mock_log, mock_from_line):
exception = Exception()
mock_from_line.side_effect = exception
# DEV: We need at least 1 line for the loop to call `CGroupInfo.from_line`
with get_mock_open(read_data="\r\n") as mock_open:
# Assert calling `get_container_info()` does not bubble up the exception
assert get_container_info() is None
# Assert we called everything we expected
mock_from_line.assert_called_once_with("\r\n")
mock_open.assert_called_once_with("/proc/self/cgroup", mode="r")
# Ensure we logged the exception
mock_log.debug.assert_called_once_with("Failed to parse cgroup file for pid %r", "self", exc_info=True)
|
scattertext/representations/CorpusSentenceIterator.py | shettyprithvi/scattertext | 1,823 | 11139283 | <gh_stars>1000+
import itertools
from scattertext import ParsedCorpus
class CorpusSentenceIterator(object):
@staticmethod
def get_sentences(corpus):
'''
Parameters
----------
corpus, ParsedCorpus
Returns
-------
iter: [sentence1word1, ...], [sentence2word1, ...]
'''
assert isinstance(corpus, ParsedCorpus)
return itertools.chain(*[[[corpus._term_idx_store.getidxstrict(t.lower_) for t in sent
if not t.is_punct]
for sent in doc.sents]
for doc in corpus.get_parsed_docs()]) |
tests/modeling/test_modeling_model_ema.py | wenliangzhao2018/d2go | 687 | 11139288 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import itertools
import unittest
import d2go.runner.default_runner as default_runner
import torch
from d2go.modeling import model_ema
from d2go.utils.testing import helper
class TestArch(torch.nn.Module):
def __init__(self, value=None, int_value=None):
super().__init__()
self.conv = torch.nn.Conv2d(3, 4, kernel_size=3, stride=1, padding=1)
self.bn = torch.nn.BatchNorm2d(4)
self.relu = torch.nn.ReLU(inplace=True)
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
if value is not None:
self.set_const_weights(value, int_value)
def forward(self, x):
ret = self.conv(x)
ret = self.bn(ret)
ret = self.relu(ret)
ret = self.avgpool(ret)
return ret
def set_const_weights(self, value, int_value=None):
if int_value is None:
int_value = int(value)
for x in itertools.chain(self.parameters(), self.buffers()):
if x.dtype == torch.float32:
x.data.fill_(value)
else:
x.data.fill_(int_value)
def _compare_state_dict(model1, model2, abs_error=1e-3):
sd1 = model1.state_dict()
sd2 = model2.state_dict()
if len(sd1) != len(sd2):
return False
if set(sd1.keys()) != set(sd2.keys()):
return False
for name in sd1:
if sd1[name].dtype == torch.float32:
if torch.abs((sd1[name] - sd2[name])).max() > abs_error:
return False
elif (sd1[name] != sd2[name]).any():
return False
return True
class TestModelingModelEMA(unittest.TestCase):
def test_emastate(self):
model = TestArch()
state = model_ema.EMAState.FromModel(model)
# two for conv (conv.weight, conv.bias),
# five for bn (bn.weight, bn.bias, bn.running_mean, bn.running_var, bn.num_batches_tracked)
self.assertEqual(len(state.state), 7)
for _, val in state.state.items():
self.assertFalse(val.requires_grad)
model1 = TestArch()
self.assertFalse(_compare_state_dict(model, model1))
state.apply_to(model1)
self.assertTrue(_compare_state_dict(model, model1))
def test_emastate_saveload(self):
model = TestArch()
state = model_ema.EMAState.FromModel(model)
model1 = TestArch()
self.assertFalse(_compare_state_dict(model, model1))
state1 = model_ema.EMAState()
state1.load_state_dict(state.state_dict())
state1.apply_to(model1)
self.assertTrue(_compare_state_dict(model, model1))
@helper.skip_if_no_gpu
def test_emastate_crossdevice(self):
model = TestArch()
model.cuda()
# state on gpu
state = model_ema.EMAState.FromModel(model)
self.assertEqual(state.device, torch.device("cuda:0"))
# target model on cpu
model1 = TestArch()
state.apply_to(model1)
self.assertEqual(next(model1.parameters()).device, torch.device("cpu"))
self.assertTrue(_compare_state_dict(copy.deepcopy(model).cpu(), model1))
# state on cpu
state1 = model_ema.EMAState.FromModel(model, device="cpu")
self.assertEqual(state1.device, torch.device("cpu"))
# target model on gpu
model2 = TestArch()
model2.cuda()
state1.apply_to(model2)
self.assertEqual(next(model2.parameters()).device, torch.device("cuda:0"))
self.assertTrue(_compare_state_dict(model, model2))
def test_ema_updater(self):
model = TestArch()
state = model_ema.EMAState()
updated_model = TestArch()
updater = model_ema.EMAUpdater(state, decay=0.0)
updater.init_state(model)
for _ in range(3):
cur = TestArch()
updater.update(cur)
state.apply_to(updated_model)
# weight decay == 0.0, always use new model
self.assertTrue(_compare_state_dict(updated_model, cur))
updater = model_ema.EMAUpdater(state, decay=1.0)
updater.init_state(model)
for _ in range(3):
cur = TestArch()
updater.update(cur)
state.apply_to(updated_model)
# weight decay == 1.0, always use init model
self.assertTrue(_compare_state_dict(updated_model, model))
def test_ema_updater_decay(self):
state = model_ema.EMAState()
updater = model_ema.EMAUpdater(state, decay=0.7)
updater.init_state(TestArch(1.0))
gt_val = 1.0
gt_val_int = 1
for idx in range(3):
updater.update(TestArch(float(idx)))
updated_model = state.get_ema_model(TestArch())
gt_val = gt_val * 0.7 + float(idx) * 0.3
gt_val_int = int(gt_val_int * 0.7 + float(idx) * 0.3)
self.assertTrue(
_compare_state_dict(updated_model, TestArch(gt_val, gt_val_int))
)
class TestModelingModelEMAHook(unittest.TestCase):
def test_ema_hook(self):
runner = default_runner.Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL_EMA.ENABLED = True
# use new model weights
cfg.MODEL_EMA.DECAY = 0.0
model = TestArch()
model_ema.may_build_model_ema(cfg, model)
self.assertTrue(hasattr(model, "ema_state"))
ema_hook = model_ema.EMAHook(cfg, model)
ema_hook.before_train()
ema_hook.before_step()
model.set_const_weights(2.0)
ema_hook.after_step()
ema_hook.after_train()
ema_checkpointers = model_ema.may_get_ema_checkpointer(cfg, model)
self.assertEqual(len(ema_checkpointers), 1)
out_model = TestArch()
ema_checkpointers["ema_state"].apply_to(out_model)
self.assertTrue(_compare_state_dict(out_model, model))
|
botocore/retries/special.py | doc-E-brown/botocore | 1,738 | 11139294 | <gh_stars>1000+
"""Special cased retries.
These are additional retry cases we still have to handle from the legacy
retry handler. They don't make sense as part of the standard mode retry
module. Ideally we should be able to remove this module.
"""
import logging
from binascii import crc32
from botocore.retries.base import BaseRetryableChecker
logger = logging.getLogger(__name__)
# TODO: This is an ideal candidate for the retryable trait once that's
# available.
class RetryIDPCommunicationError(BaseRetryableChecker):
_SERVICE_NAME = 'sts'
def is_retryable(self, context):
service_name = context.operation_model.service_model.service_name
if service_name != self._SERVICE_NAME:
return False
error_code = context.get_error_code()
return error_code == 'IDPCommunicationError'
class RetryDDBChecksumError(BaseRetryableChecker):
_CHECKSUM_HEADER = 'x-amz-crc32'
_SERVICE_NAME = 'dynamodb'
def is_retryable(self, context):
service_name = context.operation_model.service_model.service_name
if service_name != self._SERVICE_NAME:
return False
if context.http_response is None:
return False
checksum = context.http_response.headers.get(self._CHECKSUM_HEADER)
if checksum is None:
return False
actual_crc32 = crc32(context.http_response.content) & 0xffffffff
if actual_crc32 != int(checksum):
logger.debug("DynamoDB crc32 checksum does not match, "
"expected: %s, actual: %s", checksum, actual_crc32)
return True
|
menpo/io/input/__init__.py | apapaion/menpo | 311 | 11139296 | <gh_stars>100-1000
from .base import (
import_image,
import_images,
image_paths,
import_video,
import_videos,
video_paths,
import_landmark_file,
import_landmark_files,
landmark_file_paths,
import_pickle,
import_pickles,
pickle_paths,
import_builtin_asset,
menpo_data_path_to as data_path_to,
menpo_data_dir_path as data_dir_path,
menpo_ls_builtin_assets as ls_builtin_assets,
register_image_importer,
register_landmark_importer,
register_pickle_importer,
register_video_importer,
same_name,
same_name_video,
resolve_from_paths,
)
|
fbpic/particles/elementary_process/compton/inline_functions.py | fractional-ray/fbpic | 131 | 11139325 | <reponame>fractional-ray/fbpic
"""
This file is part of the Fourier-Bessel Particle-In-Cell code (FB-PIC)
It defines inline functions that are used both on GPU and CPU, and
used in the Compton scattering code.
These functions are compiled for GPU or CPU respectively, when imported
into the files numba_methods.py and cuda_methods.py respectively.
"""
import math
from scipy.constants import c, m_e, physical_constants
# Get additional useful constants
r_e = physical_constants['classical electron radius'][0]
PI_RE_2 = math.pi * r_e**2
INV_MC = 1./( m_e*c )
def lorentz_transform( p_in, px_in, py_in, pz_in, gamma, beta, nx, ny, nz ):
"""
Perform a Lorentz transform of the 4-momentum (p_in, px_in, py_in, pz_in)
and return the results
Parameters
----------
p_in, px_in, py_in, pz_in: floats
The coordinates of the 4-momentum
gamma, beta: floats
Lorentz factor and corresponding beta of the Lorentz transform
nx, ny, nz: floats
Coordinates of *normalized* vector that indicates
the direction of the transform
"""
p_parallel_in = nx*px_in + ny*py_in + nz*pz_in
p_out = gamma * ( p_in - beta * p_parallel_in )
p_parallel_out = gamma * ( p_parallel_in - beta * p_in )
px_out = px_in + nx * ( p_parallel_out - p_parallel_in )
py_out = py_in + ny * ( p_parallel_out - p_parallel_in )
pz_out = pz_in + nz * ( p_parallel_out - p_parallel_in )
return( p_out, px_out, py_out, pz_out )
def get_scattering_probability(
dt, elec_ux, elec_uy, elec_uz, elec_inv_gamma,
photon_n, photon_p, photon_beta_x, photon_beta_y, photon_beta_z ):
"""
Return the probability of Comton scattering, for a given electron,
during `dt` (taken in the frame of the simulation).
The actual calculation is done in the rest frame of the electron,
in order to apply the Klein-Nishina formula ; therefore `dt` is converted
to the corresponding proper time, and the properties of the incoming photon
flux are Lorentz-transformed.
Parameters:
-----------
dt: float (in seconds)
Time interval considered, in the frame of the simulation.
elec_ux, elec_uy, elec_uz, elec_inv_gamma: floats (dimensionless)
The momenta and inverse gamma factor of the emitting electron
(in the frame of the simulation)
photon_n, photon_p, photon_beta_x, photon_beta_y, photon_beta_z
Properties of the photon flux (in the frame of the simulation)
"""
# Get electron intermediate variable
elec_gamma = 1./elec_inv_gamma
# Get photon density and momentum in the rest frame of the electron
transform_factor = elec_gamma \
- elec_ux*photon_beta_x - elec_uy*photon_beta_y - elec_uz*photon_beta_z
photon_n_rest = photon_n * transform_factor
photon_p_rest = photon_p * transform_factor
# Calculate the Klein-Nishina cross-section
k = photon_p_rest * INV_MC
f1 = 2 * ( 2 + k*(1+k)*(8+k) ) / ( k**2 * (1 + 2*k)**2 )
f2 = ( 2 + k*(2-k) ) * math.log( 1 + 2*k ) / k**3
sigma = PI_RE_2 * ( f1 - f2 )
# Get the electron proper time
proper_dt_rest = dt * elec_inv_gamma
# Calculate the probability of scattering
p = 1 - math.exp( - sigma * photon_n_rest * c * proper_dt_rest )
return( p )
def get_photon_density_gaussian(
elec_x, elec_y, elec_z, ct, photon_n_lab_max, inv_laser_waist2,
inv_laser_ctau2, laser_initial_z0, gamma_boost, beta_boost ):
"""
Get the photon density in the scattering Gaussian laser pulse,
at the position of a given electron, and at the current time.
Parameters
----------
elec_x, elec_y, elec_z: floats
The position of the given electron (in the frame of the simulation)
ct: float
Current time in the simulation frame (multiplied by c)
photon_n_lab_max: float
Peak photon density (in the lab frame)
(i.e. at the peak of the Gaussian pulse)
inv_laser_waist2, inv_laser_ctau2, laser_initial_z0: floats
Properties of the Gaussian laser pulse (in the lab frame)
gamma_boost, beta_boost: floats
Properties of the Lorentz boost between the lab and simulation frame.
Returns
-------
photon_n_sim: float
The photon density in the frame of the simulation
"""
# Transform electrons coordinates from simulation frame to lab frame
elec_zlab = gamma_boost*( elec_z + beta_boost*ct )
elec_ctlab = gamma_boost*( ct + beta_boost*elec_z )
# Get photon density *in the lab frame*
photon_n_lab = photon_n_lab_max * math.exp(
- 2*inv_laser_waist2*( elec_x**2 + elec_y**2 ) \
- 2*inv_laser_ctau2*(elec_zlab - laser_initial_z0 + elec_ctlab)**2 )
# Get photon density *in the simulation frame*
photon_n_sim = gamma_boost*photon_n_lab*( 1 + beta_boost)
return( photon_n_sim )
|
qiskit/utils/classtools.py | Roshan-Thomas/qiskit-terra | 1,456 | 11139334 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tools useful for creating decorators, and other high-level callables."""
import functools
import inspect
import types
from typing import Type, Callable
# On user-defined classes, `__new__` is magically inferred to be a staticmethod, `__init_subclass__`
# is magically inferred to be a class method and `__prepare__` must be defined as a classmethod, but
# the CPython types implemented in C (such as `object` and `type`) are `types.BuiltinMethodType`,
# which we can't distinguish properly, so we need a little magic.
_MAGIC_STATICMETHODS = {"__new__"}
_MAGIC_CLASSMETHODS = {"__init_subclass__", "__prepare__"}
# `type` itself has several methods (mostly dunders). When we are wrapping those names, we need to
# make sure that we don't interfere with `type.__getattribute__`'s handling that circumvents the
# normal inheritance rules when appropriate.
_TYPE_METHODS = set(dir(type))
class _lift_to_method: # pylint: disable=invalid-name
"""A decorator that ensures that an input callable object implements ``__get__``. It is
returned unchanged if so, otherwise it is turned into the default implementation for functions,
which makes them bindable to instances.
Python-space functions and lambdas already have this behaviour, but builtins like ``print``
don't; using this class allows us to do::
wrap_method(MyClass, "maybe_mutates_arguments", before=print, after=print)
to simply print all the arguments on entry and exit of the function, which otherwise wouldn't be
valid, since ``print`` isn't a descriptor.
"""
__slots__ = ("_method",)
def __new__(cls, method):
if hasattr(method, "__get__"):
return method
return super().__new__(cls)
def __init__(self, method):
if method is self:
# Prevent double-initialisation if we are passed an instance of this object to lift.
return
self._method = method
def __get__(self, obj, objtype):
# This is effectively the same implementation as `types.FunctionType.__get__`, but we can't
# bind that directly because it also includes a requirement that its `self` reference is of
# the correct type, and this isn't.
if obj is None:
return self._method
return types.MethodType(self._method, obj)
class _WrappedMethod:
"""Descriptor which calls its two arguments in succession, correctly handling instance- and
class-method calls.
It is intended that this class will replace the attribute that ``inner`` previously was on a
class or instance. When accessed as that attribute, this descriptor will behave it is the same
function call, but with the ``function`` called before or after.
"""
__slots__ = ("_method_decorator", "_method_has_get", "_method", "_before", "_after")
def __init__(self, method, before=None, after=None):
if isinstance(method, (classmethod, staticmethod)):
self._method_decorator = type(method)
elif isinstance(method, type(self)):
self._method_decorator = method._method_decorator
elif getattr(method, "__name__", None) in _MAGIC_STATICMETHODS:
self._method_decorator = staticmethod
elif getattr(method, "__name__", None) in _MAGIC_CLASSMETHODS:
self._method_decorator = classmethod
else:
self._method_decorator = _lift_to_method
before = (self._method_decorator(before),) if before is not None else ()
after = (self._method_decorator(after),) if after is not None else ()
if isinstance(method, type(self)):
self._method = method._method
self._before = before + method._before
self._after = method._after + after
else:
self._before = before
self._after = after
self._method = method
# If the inner method doesn't have `__get__` (like some builtin methods), it's faster to
# test a Boolean each time than the repeatedly raise and catch an exception, which is what
# `hasattr` does.
self._method_has_get = hasattr(self._method, "__get__")
def __get__(self, obj, objtype=None):
# `self._method` doesn't invoke the `_method` descriptor (if it is one) because that only
# happens for class variables. Here it's an instance variable, so we can pass through `obj`
# and `objtype` correctly like this.
method = self._method.__get__(obj, objtype) if self._method_has_get else self._method
@functools.wraps(method)
def out(*args, **kwargs):
for callback in self._before:
callback.__get__(obj, objtype)(*args, **kwargs)
retval = method(*args, **kwargs)
for callback in self._after:
callback.__get__(obj, objtype)(*args, **kwargs)
return retval
return out
def wrap_method(cls: Type, name: str, *, before: Callable = None, after: Callable = None):
"""Wrap the functionality the instance- or class method ``cls.name`` with additional behaviour
``before`` and ``after``.
This mutates ``cls``, replacing the attribute ``name`` with the new functionality. This is
useful when creating class decorators. The method is allowed to be defined on any parent class
instead.
If either ``before`` or ``after`` are given, they should be callables with a compatible
signature to the method referred to. They will be called immediately before or after the method
as appropriate, and any return value will be ignored.
Args:
cls: the class to modify.
name: the name of the method on the class to wrap.
before: a callable that should be called before the method that is being wrapped.
after: a callable that should be called after the method that is being wrapped.
Raises:
ValueError: if the named method is not defined on the class or any parent class.
"""
# The best time to apply decorators to methods is before they are bound (e.g. by using function
# decorators during the class definition), but if we're making a class decorator, we can't do
# that. We need the actual definition of the method, so we have to dodge the normal output of
# `type.__getattribute__`, which evalutes descriptors if it finds them, unless the name we're
# looking for is defined on `type` itself. In that case, we need the attribute getter to
# correctly return the underlying object, not the one that `type` defines for its own purposes.
attribute_getter = type.__getattribute__ if name in _TYPE_METHODS else object.__getattribute__
for cls_ in inspect.getmro(cls):
try:
method = attribute_getter(cls_, name)
break
except AttributeError:
pass
else:
raise ValueError(f"Method '{name}' is not defined for class '{cls.__name__}'")
setattr(cls, name, _WrappedMethod(method, before, after))
|
_old/ReplyDict/replydict.py | tigefa4u/reddit | 444 | 11139353 | #/u/GoldenSights
import json
import praw
import random
import sqlite3
import time
import traceback
'''USER CONFIGURATION'''
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot"
SUBREDDIT = "GoldTesting"
#This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subreddits, use "sub1+sub2+sub3+..."
COMMENTHEADER = "This is at the top of the comment"
COMMENTFOOTER = "This is at the bottom of the comment"
#These can be blank if you don't want them.
DICTFILE = 'snakes.txt'
#The file with the Keys/Values
RESULTFORM = "[_key_](_value_)"
#This is the form that the result will take
#You may use _key_ and _value_ to inject the key/value from the dict.
#This preset will create a link where the text is the snake name and the url is the wiki link
#You may delete one or both of these injectors.
KEYAUTHORS = []
# These are the names of the authors you are looking for
# The bot will only reply to authors on this list
# Keep it empty to allow anybody.
MULTIPLE_MATCHES = True
# If True, the comment will respond to multiple keywords within the comment.
# Using snakes.txt as an example, True means that we will link multiple snake URLs if
# the comment contained multiple snake names.
# If False, only keep the first generated response. Because dictionaries are unordered,
# there is no guarantee which one will be picked.
LEVENMODE = True
#If this is True it will use a function that is slow but can find misspelled keys
#If this is False it will use a simple function that is very fast but can only find keys which are spelled exactly
MAXPOSTS = 100
#This is how many posts you want to retrieve all at once. PRAW can download 100 at a time.
WAIT = 30
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
'''All done!'''
try:
import bot
USERAGENT = bot.aG
except ImportError:
pass
with open(DICTFILE, 'r') as f:
DICT = json.loads(f.read())
sql = sqlite3.connect('sql.db')
print('Loaded SQL Database')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS oldposts(ID TEXT)')
cur.execute('CREATE INDEX IF NOT EXISTS oldpost_index ON oldposts(id)')
print('Loaded Completed table')
sql.commit()
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
if r.has_scope('identity'):
USERNAME = r.user.name.lower()
else:
USERNAME = ''
def levenshtein(s1, s2):
#Levenshtein algorithm to figure out how close two strings are two each other
#Courtesy http://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python
if len(s1) < len(s2):
return levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def findsuper(comment, tolerance= 3):
'''
Look in the comment for any matches in the dict.
This version uses the levenshtein function to look for keys
which are slightly mispelled.
'''
results = []
used = []
for itemname in DICT:
itemlength = len(itemname.split())
pos = 0
commentsplit = comment.split()
#print(commentsplit)
end = False
while not end:
try:
gram = commentsplit[pos:pos+itemlength]
gramjoin = ' '.join(gram)
lev = levenshtein(itemname, gramjoin)
#print(snakename, gramjoin)
#print(lev)
if lev <= tolerance:
if itemname not in used:
used.append(itemname)
result = RESULTFORM
result = result.replace('_key_', itemname)
result = result.replace('_value_', get_response(itemname))
results.append(result)
if MULTIPLE_MATCHES is False:
return results
pos += 1
if pos > len(commentsplit):
end = True
except IndexError:
end = True
return results
def findsimple(comment):
'''
Look in the comment for any matches in the dict.
This version just checks whether the text is in the body.
It's faster than findsuper.
'''
results = []
for itemname in DICT:
if itemname.lower() in comment.lower():
result = RESULTFORM
result = result.replace('_key_', itemname)
result = result.replace('_value_', get_response(itemname))
results.append(result)
if MULTIPLE_MATCHES is False:
return results
return results
def get_response(key):
'''
Return the appropriate response for this keyword.
If the value for this key is a list, we will return one random
element using `random.choice`.
Otherwise just return whatever is there.
Does not protect against KeyErrors.
'''
value = DICT[key]
if isinstance(value, list):
return random.choice(value)
return value
def replydict():
print('Searching '+ SUBREDDIT + '.')
subreddit = r.get_subreddit(SUBREDDIT)
posts = subreddit.get_comments(limit=MAXPOSTS)
for post in posts:
results = []
pid = post.id
try:
pauthor = post.author.name.lower()
except AttributeError:
continue
if KEYAUTHORS != [] and all(keyauthor != pauthor for keyauthor in KEYAUTHORS):
continue
if pauthor == USERNAME:
# Will not reply to self
continue
cur.execute('SELECT * FROM oldposts WHERE ID == ?', [pid])
if cur.fetchone():
# Already in database
continue
cur.execute('INSERT INTO oldposts VALUES(?)', [pid])
sql.commit()
pbody = post.body.lower()
if LEVENMODE is True:
results = findsuper(pbody)
else:
results = findsimple(pbody)
if len(results) == 0:
continue
newcomment = COMMENTHEADER
newcomment += '\n\n' + '\n\n'.join(results) + '\n\n'
newcomment += COMMENTFOOTER
note = 'Replying to {id} by {author} with {count} items'
note = note.format(id=pid, author=pauthor, count=len(results))
print(note)
post.reply(newcomment)
while True:
try:
replydict()
except Exception as e:
traceback.print_exc()
print('Running again in %d seconds \n' % WAIT)
sql.commit()
time.sleep(WAIT)
|
pytorch_nlu/pytorch_sequencelabeling/slPredict.py | dumpmemory/Pytorch-NLU | 115 | 11139388 | <reponame>dumpmemory/Pytorch-NLU
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2021/7/25 19:30
# @author : Mo
# @function: predict model, 预测模块
# 适配linux
import sys
import os
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "."))
sys.path.append(path_root)
from tcConfig import model_config
os.environ["CUDA_VISIBLE_DEVICES"] = model_config.get("CUDA_VISIBLE_DEVICES", "0")
from slConfig import _SL_MODEL_SOFTMAX, _SL_MODEL_GRID, _SL_MODEL_SPAN, _SL_MODEL_CRF
from slConfig import _SL_DATA_CONLL, _SL_DATA_SPAN
from slTools import get_logger, load_json
from slOffice import Office
from slData import Corpus
from argparse import Namespace
class SequenceLabelingPredict:
def __init__(self, path_config):
""" 初始化 """
self.load_config(path_config)
self.load_model()
def load_config(self, path_config):
""" 加载超参数 """
config = load_json(path_config)
self.config = Namespace(**config)
self.logger = get_logger(self.config.model_save_path)
self.l2i, self.i2l = self.config.l2i, self.config.i2l
# 数据预处理 类
self.corpus = Corpus(self.config, self.logger)
def load_model(self):
""" 加载模型 """
self.office = Office(config=self.config, logger=self.logger)
self.office.load_model()
def process(self, texts):
""" 数据预处理, process """
# token 转 idx, 训练集/验证集
datas_xy, _ = self.corpus.read_texts_from_json(texts, keys=self.config.xy_keys_predict)
if self.config.task_type.upper() in [_SL_MODEL_SPAN]:
sl_preprocess = self.corpus.preprocess_span
elif self.config.task_type.upper() in [_SL_MODEL_GRID]:
sl_preprocess = self.corpus.preprocess_grid
else:
sl_preprocess = self.corpus.preprocess_common
dataset = sl_preprocess(datas_xy, self.config.l2i, l2i_conll=self.config.l2i_conll, sl_ctype=self.config.sl_ctype, max_len=self.config.max_len)
return dataset
def predict(self, texts):
""" 预测 """
dataset = self.process(texts)
res = self.office.predict(dataset)
return res
if __name__ == "__main__":
path_config = "../output/sequence_labeling/model_ERNIE/sl.config"
tcp = SequenceLabelingPredict(path_config)
texts = [{"text": "平乐县,古称昭州,隶属于广西壮族自治区桂林市,位于广西东北部,桂林市东南部,东临钟山县,南接昭平,西北毗邻阳朔,北连恭城,总面积1919.34平方公里。"},
{"text": "平乐县主要旅游景点有榕津千年古榕、冷水石景苑、仙家温泉、桂江风景区、漓江风景区等,平乐县为漓江分界点,平乐以北称漓江,以南称桂江,是著名的大桂林旅游区之一。"},
{"text": "印岭玲珑,昭水晶莹,环绕我平中。青年的乐园,多士受陶熔。生活自觉自治,学习自发自动。五育并重,手脑并用。迎接新潮流,建设新平中"},
{"text": "桂林山水甲天下, 阳朔山水甲桂林"},
]
res = tcp.predict(texts)
print(res)
while True:
print("请输入:")
question = input()
res = tcp.predict([{"text": question}])
print(res)
|
1]. DSA/1]. Data Structures/06]. Linked List/Python/count_rotations.py | Utqrsh04/The-Complete-FAANG-Preparation | 6,969 | 11139396 | <reponame>Utqrsh04/The-Complete-FAANG-Preparation<filename>1]. DSA/1]. Data Structures/06]. Linked List/Python/count_rotations.py
from insertion import node
def push(head_ref, newdata):
new_node = node(newdata)
new_node.data = newdata
new_node.next = head_ref
head_ref = new_node
return head_ref
def prList(node):
while (node != None):
print(node.data, end = " ")
node = node.next
def countRotation(head):
count = 0
min = head.data
while head:
if head.data < min:
break
count += 1
head = head.next
return count
head = None
head = push(head, 12)
head = push(head, 11)
head = push(head, 8)
head = push(head, 5)
head = push(head, 18)
head = push(head, 15)
prList(head);
print()
print("Linked list rotated elements: ",
end = '')
print(countRotation(head)) |
alex/utils/interface.py | oplatek/alex | 184 | 11139397 | import inspect
def interface_method(f):
f.abstract = True
return f
class Interface(object):
def __new__(cls, *args, **kwargs):
res = super(Interface, cls).__new__(cls, *args, **kwargs)
missing_methods = []
for method in inspect.getmembers(res, predicate=inspect.ismethod):
if getattr(method[1], 'abstract', False):
missing_methods += [method[0]]
if len(missing_methods) > 0:
raise Exception("Class %s is missing these interface methods: %s" %\
(cls.__name__,", ".join((missing_methods))))
return res
|
mmf/datasets/builders/clevr/dataset.py | facebookresearch/pythia | 3,252 | 11139404 | <gh_stars>1000+
import json
import os
import numpy as np
import torch
from mmf.common.sample import Sample
from mmf.datasets.base_dataset import BaseDataset
from mmf.utils.distributed import is_main, synchronize
from mmf.utils.general import get_mmf_root
from mmf.utils.text import tokenize, VocabFromText
from PIL import Image
_CONSTANTS = {
"questions_folder": "questions",
"dataset_key": "clevr",
"empty_folder_error": "CLEVR dataset folder is empty.",
"questions_key": "questions",
"question_key": "question",
"answer_key": "answer",
"train_dataset_key": "train",
"images_folder": "images",
"vocabs_folder": "vocabs",
}
_TEMPLATES = {
"data_folder_missing_error": "Data folder {} for CLEVR is not present.",
"question_json_file": "CLEVR_{}_questions.json",
"vocab_file_template": "{}_{}_vocab.txt",
}
class CLEVRDataset(BaseDataset):
"""Dataset for CLEVR. CLEVR is a reasoning task where given an image with some
3D shapes you have to answer basic questions.
Args:
dataset_type (str): type of dataset, train|val|test
config (DictConfig): Configuration Node representing all of the data necessary
to initialize CLEVR dataset class
data_folder: Root folder in which all of the data will be present if passed
replaces default based on data_dir and data_folder in config.
"""
def __init__(self, config, dataset_type, data_folder=None, *args, **kwargs):
super().__init__(_CONSTANTS["dataset_key"], config, dataset_type)
self._data_folder = data_folder
self._data_dir = os.path.join(get_mmf_root(), config.data_dir)
if not self._data_folder:
self._data_folder = os.path.join(self._data_dir, config.data_folder)
if not os.path.exists(self._data_folder):
raise RuntimeError(
_TEMPLATES["data_folder_missing_error"].format(self._data_folder)
)
# Check if the folder was actually extracted in the subfolder
if config.data_folder in os.listdir(self._data_folder):
self._data_folder = os.path.join(self._data_folder, config.data_folder)
if len(os.listdir(self._data_folder)) == 0:
raise FileNotFoundError(_CONSTANTS["empty_folder_error"])
self.load()
def load(self):
self.image_path = os.path.join(
self._data_folder, _CONSTANTS["images_folder"], self._dataset_type
)
with open(
os.path.join(
self._data_folder,
_CONSTANTS["questions_folder"],
_TEMPLATES["question_json_file"].format(self._dataset_type),
)
) as f:
self.questions = json.load(f)[_CONSTANTS["questions_key"]]
# Vocab should only be built in main process, as it will repetition of same task
if is_main():
self._build_vocab(self.questions, _CONSTANTS["question_key"])
self._build_vocab(self.questions, _CONSTANTS["answer_key"])
synchronize()
def __len__(self):
return len(self.questions)
def _get_vocab_path(self, attribute):
return os.path.join(
self._data_dir,
_CONSTANTS["vocabs_folder"],
_TEMPLATES["vocab_file_template"].format(self.dataset_name, attribute),
)
def _build_vocab(self, questions, attribute):
# Vocab should only be built from "train" as val and test are not observed in training
if self._dataset_type != _CONSTANTS["train_dataset_key"]:
return
vocab_file = self._get_vocab_path(attribute)
# Already exists, no need to recreate
if os.path.exists(vocab_file):
return
# Create necessary dirs if not present
os.makedirs(os.path.dirname(vocab_file), exist_ok=True)
sentences = [question[attribute] for question in questions]
build_attributes = self.config.build_attributes
# Regex is default one in tokenize i.e. space
kwargs = {
"min_count": build_attributes.get("min_count", 1),
"keep": build_attributes.get("keep", [";", ","]),
"remove": build_attributes.get("remove", ["?", "."]),
}
if attribute == _CONSTANTS["answer_key"]:
kwargs["only_unk_extra"] = False
vocab = VocabFromText(sentences, **kwargs)
with open(vocab_file, "w") as f:
f.write("\n".join(vocab.word_list))
def __getitem__(self, idx):
data = self.questions[idx]
# Each call to __getitem__ from dataloader returns a Sample class object which
# collated by our special batch collator to a SampleList which is basically
# a attribute based batch in layman terms
current_sample = Sample()
question = data["question"]
tokens = tokenize(question, keep=[";", ","], remove=["?", "."])
processed = self.text_processor({"tokens": tokens})
current_sample.text = processed["text"]
processed = self.answer_processor({"answers": [data["answer"]]})
current_sample.answers = processed["answers"]
current_sample.targets = processed["answers_scores"]
image_path = os.path.join(self.image_path, data["image_filename"])
image = np.true_divide(Image.open(image_path).convert("RGB"), 255)
image = image.astype(np.float32)
current_sample.image = torch.from_numpy(image.transpose(2, 0, 1))
return current_sample
|
gluoncv/auto/estimators/yolo/utils.py | Kh4L/gluon-cv | 5,447 | 11139423 | """Utils for auto YOLO estimator"""
import os
from mxnet import gluon
from ....data import MixupDetection
from ....data.batchify import Tuple, Stack, Pad
from ....data.dataloader import RandomTransformDataLoader
from ....data.transforms.presets.yolo import YOLO3DefaultTrainTransform
from ....data.transforms.presets.yolo import YOLO3DefaultValTransform
from .... import data as gdata
from ....utils.metrics.voc_detection import VOC07MApMetric
from ....utils.metrics.coco_detection import COCODetectionMetric
def _get_dataset(dataset, args):
if dataset.lower() == 'voc':
train_dataset = gdata.VOCDetection(
splits=[(2007, 'trainval'), (2012, 'trainval')])
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'voc_tiny':
# need to download the dataset and specify the path to store the dataset in
# root = os.path.expanduser('~/.mxnet/datasets/')
# filename_zip = ag.download('https://autogluon.s3.amazonaws.com/datasets/tiny_motorbike.zip', path=root)
# filename = ag.unzip(filename_zip, root=root)
# data_root = os.path.join(root, filename)
train_dataset = gdata.CustomVOCDetectionBase(classes=('motorbike',), root=args.dataset_root + 'tiny_motorbike',
splits=[('', 'trainval')])
val_dataset = gdata.CustomVOCDetectionBase(classes=('motorbike',), root=args.dataset_root + 'tiny_motorbike',
splits=[('', 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(splits='instances_train2017', use_crowd=False)
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(
val_dataset, os.path.join(args.logdir, args.save_prefix + '_eval'), cleanup=True,
data_shape=(args.yolo3.data_shape, args.yolo3.data_shape))
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
if args.train.num_samples < 0:
args.train.num_samples = len(train_dataset)
if args.train.mixup:
train_dataset = MixupDetection(train_dataset)
return train_dataset, val_dataset, val_metric
def _get_dataloader(net, train_dataset, val_dataset, data_shape, batch_size, num_workers, args):
"""Get dataloader."""
width, height = data_shape, data_shape
# stack image, all targets generated
batchify_fn = Tuple(*([Stack() for _ in range(6)] + [Pad(axis=0, pad_val=-1) for _ in range(1)]))
if args.yolo3.no_random_shape:
train_loader = gluon.data.DataLoader(
train_dataset.transform(YOLO3DefaultTrainTransform(width, height, net, mixup=args.train.mixup)),
batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
else:
transform_fns = [YOLO3DefaultTrainTransform(x * 32, x * 32, net, mixup=args.train.mixup) for x in range(10, 20)]
train_loader = RandomTransformDataLoader(
transform_fns, train_dataset, batch_size=batch_size, interval=10, last_batch='rollover',
shuffle=True, batchify_fn=batchify_fn, num_workers=num_workers)
val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
val_loader = gluon.data.DataLoader(
val_dataset.transform(YOLO3DefaultValTransform(width, height)),
batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers)
train_eval_loader = gluon.data.DataLoader(
train_dataset.transform(YOLO3DefaultValTransform(width, height)),
batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers)
return train_loader, val_loader, train_eval_loader
def _save_params(net, best_map, current_map, epoch, save_interval, prefix):
current_map = float(current_map)
if current_map > best_map[0]:
best_map[0] = current_map
net.save_parameters('{:s}_{:04d}_{:.4f}_best.params'.format(prefix, epoch, current_map))
with open(prefix+'_best_map.log', 'a') as log_file:
log_file.write('{:04d}:\t{:.4f}\n'.format(epoch, current_map))
if save_interval and epoch % save_interval == 0:
net.save_parameters('{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_map))
|
osf/migrations/0176_pagecounter_data.py | gaybro8777/osf.io | 628 | 11139444 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-12 17:18
from __future__ import unicode_literals
import logging
from django.db import migrations
from osf.management.commands.migrate_pagecounter_data import FORWARD_SQL, REVERSE_SQL
from website.settings import DEBUG_MODE
logger = logging.getLogger(__name__)
class Migration(migrations.Migration):
dependencies = [
('osf', '0175_pagecounter_schema'),
]
if DEBUG_MODE:
operations = [
migrations.RunSQL(FORWARD_SQL, REVERSE_SQL)
]
else:
operations = []
logger.info(
'The automatic migration only runs in DEBUG_MODE. Use management command migrate_pagecount_data instead'
)
|
examples/example_sim_grad_cloth.py | NVIDIA/warp | 306 | 11139448 | <gh_stars>100-1000
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# Example Sim Grad Cloth
#
# Shows how to use Warp to optimize the initial velocities of a piece of
# cloth such that it's center of mass hits a target after a specified time.
#
# This example uses the built-in wp.Tape() object to compute gradients of
# the distance to target (loss) w.r.t the initial velocity, followed by
# a simple gradient-descent optimization step.
#
###########################################################################
import os
import math
import numpy as np
import warp as wp
import warp.sim
import warp.sim.render
wp.init()
class Cloth:
# seconds
sim_duration = 2.0
# control frequency
frame_dt = 1.0/60.0
frame_steps = int(sim_duration/frame_dt)
# sim frequency
sim_substeps = 16
sim_steps = frame_steps * sim_substeps
sim_dt = frame_dt / sim_substeps
sim_time = 0.0
render_time = 0.0
train_iters = 64
train_rate = 5.0
def __init__(self, render=True, profile=False, adapter='cpu'):
builder = wp.sim.ModelBuilder()
dim_x = 16
dim_y = 16
builder.add_cloth_grid(pos=(0.0, 0.0, 0.0),
vel=(0.1, 0.1, 0.0),
rot=wp.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.25),
dim_x=dim_x,
dim_y=dim_y,
cell_x=1.0/dim_x,
cell_y=1.0/dim_y,
mass=1.0)
self.device = adapter
self.profile = profile
self.model = builder.finalize(adapter)
self.model.ground = False
self.model.tri_ke = 10000.0
self.model.tri_ka = 10000.0
self.model.tri_kd = 100.0
self.model.tri_kb = 0.0
self.model.tri_lift = 10.0
self.model.tri_drag = 5.0
self.integrator = wp.sim.SemiImplicitIntegrator()
self.target = (8.0, 0.0, 0.0)
self.com = wp.zeros(1, dtype=wp.vec3, device=adapter, requires_grad=True)
self.loss = wp.zeros(1, dtype=wp.float32, device=adapter, requires_grad=True)
# allocate sim states for trajectory
self.states = []
for i in range(self.sim_steps+1):
self.states.append(self.model.state(requires_grad=True))
if (self.render):
self.stage = wp.sim.render.SimRenderer(self.model, os.path.join(os.path.dirname(__file__), "outputs/example_sim_grad_cloth.usd"))
@wp.kernel
def com_kernel(positions: wp.array(dtype=wp.vec3),
n: int,
com: wp.array(dtype=wp.vec3)):
tid = wp.tid()
# compute center of mass
wp.atomic_add(com, 0, positions[tid]/float(n))
@wp.kernel
def loss_kernel(com: wp.array(dtype=wp.vec3),
target: wp.vec3,
loss: wp.array(dtype=float)):
# sq. distance to target
delta = com[0]-target
loss[0] = wp.dot(delta, delta)
@wp.kernel
def step_kernel(x: wp.array(dtype=wp.vec3),
grad: wp.array(dtype=wp.vec3),
alpha: float):
tid = wp.tid()
# gradient descent step
x[tid] = x[tid] - grad[tid]*alpha
def compute_loss(self):
# run control loop
for i in range(self.sim_steps):
self.states[i].clear_forces()
self.integrator.simulate(self.model,
self.states[i],
self.states[i+1],
self.sim_dt)
# compute loss on final state
self.com.zero_()
wp.launch(self.com_kernel, dim=self.model.particle_count, inputs=[self.states[-1].particle_q, self.model.particle_count, self.com], device=self.device)
wp.launch(self.loss_kernel, dim=1, inputs=[self.com, self.target, self.loss], device=self.device)
return self.loss
def render(self, iter):
# render every 4 iters
if iter % 4 > 0:
return
# draw trajectory
traj_verts = [self.states[0].particle_q.numpy().mean(axis=0)]
for i in range(0, self.sim_steps, self.sim_substeps):
traj_verts.append(self.states[i].particle_q.numpy().mean(axis=0))
self.stage.begin_frame(self.render_time)
self.stage.render(self.states[i])
self.stage.render_box(pos=self.target, rot=wp.quat_identity(), extents=(0.1, 0.1, 0.1), name="target")
self.stage.render_line_strip(vertices=traj_verts, color=wp.render.bourke_color_map(0.0, 269.0, self.loss.numpy()[0]), radius=0.02, name=f"traj_{iter}")
self.stage.end_frame()
self.render_time += self.frame_dt
self.stage.save()
def train(self, mode='gd'):
tape = wp.Tape()
for i in range(self.train_iters):
with wp.ScopedTimer("Forward", active=self.profile):
with tape:
self.compute_loss()
with wp.ScopedTimer("Backward", active=self.profile):
tape.backward(self.loss)
with wp.ScopedTimer("Render", active=self.profile):
self.render(i)
with wp.ScopedTimer("Step", active=self.profile):
x = self.states[0].particle_qd
x_grad = tape.gradients[self.states[0].particle_qd]
print(f"Iter: {i} Loss: {self.loss}")
wp.launch(self.step_kernel, dim=len(x), inputs=[x, x_grad, self.train_rate], device=self.device)
tape.reset()
def train_graph(self, mode='gd'):
wp.capture_begin()
tape = wp.Tape()
with tape:
self.compute_loss()
tape.backward(self.loss)
self.graph = wp.capture_end()
for i in range(self.train_iters):
with wp.ScopedTimer("Replay", active=self.profile):
wp.capture_launch(self.graph)
with wp.ScopedTimer("Render", active=self.profile):
self.render(i)
with wp.ScopedTimer("Step", active=self.profile):
x = self.states[0].particle_qd
print(f"Iter: {i} Loss: {self.loss}")
wp.launch(self.step_kernel, dim=len(x), inputs=[x, x.grad, self.train_rate], device=self.device)
tape.zero()
bounce = Cloth(adapter="cuda", profile=False, render=True)
bounce.train_graph('gd')
|
nhentai/__init__.py | xyzkljl1/nhentai | 673 | 11139458 | __version__ = '0.4.16'
__author__ = 'RicterZ'
__email__ = '<EMAIL>'
|
dbReports/iondb/rundb/data/dmfilestat_utils.py | konradotto/TS | 125 | 11139487 | #!/usr/bin/env python
# Copyright (C) 2014 Ion Torrent Systems, Inc. All Rights Reserved
from __future__ import absolute_import
import os
import errno
from django.db.models import Sum
from iondb.rundb.models import DMFileStat, FileServer, Chip
from iondb.rundb.data import dmactions_types
from iondb.rundb.data import dm_utils
from celery.utils.log import get_task_logger
logger = get_task_logger("data_management")
logid = {"logid": "%s" % ("tasks")}
def _unique_experiment(values):
uvalues = []
pks = []
for v in values:
if v["result__experiment__pk"] not in pks:
pks.append(v["result__experiment__pk"])
uvalues.append(v)
return uvalues
def calculate_diskspace_by_path(dmfilestats, fs_path):
"""
Calculates dmfilestats diskspace on given path.
Assumptions:
BaseCalling Input: Proton fullchip onboard results are located in expDir
BaseCalling Input: Proton fullchip shared for single Experiment, count only once for multiple Results
Intermediate Files: PGM and thumbnail almost all Intermediate files are in report dir
Intermediate Files: Proton fullchip about 2% in expDir, the rest in report
"""
ret = {}
expDir_objs = dmfilestats.filter(
result__experiment__expDir__startswith=fs_path
).order_by("-pk")
reportDir_objs = dmfilestats.filter(
result__reportstorage__dirPath__startswith=fs_path
).order_by("-pk")
for dmtype in dmactions_types.FILESET_TYPES:
# find proton fullchip data
proton_chips = Chip.objects.filter(
instrumentType__in=["proton", "S5"]
).values_list("name", flat=True)
proton_filestats = dmfilestats.filter(
result__experiment__chipType__in=proton_chips
).exclude(result__metaData__contains="thumb")
if dmtype == dmactions_types.SIG:
values = (
expDir_objs.filter(dmfileset__type=dmtype)
.values("result__experiment__pk", "diskspace")
.distinct()
)
ret[dmtype] = sum(
v["diskspace"] for v in _unique_experiment(values) if v["diskspace"]
)
if dmtype == dmactions_types.OUT:
ret[dmtype] = (
reportDir_objs.filter(dmfileset__type=dmtype).aggregate(
sum=Sum("diskspace")
)["sum"]
or 0
)
if dmtype == dmactions_types.BASE:
# Proton fullchip sets are in exp folder
values = (
expDir_objs.filter(dmfileset__type=dmtype, pk__in=proton_filestats)
.values("result__experiment__pk", "diskspace")
.distinct()
)
proton_diskspace = sum(
v["diskspace"] for v in _unique_experiment(values) if v["diskspace"]
)
# PGM and thumbnail Basecalling Input are in report folder
objs = reportDir_objs.filter(
dmfileset__type=dmtype, result__parentResult__isnull=True
).exclude(pk__in=proton_filestats)
diskspace = objs.aggregate(sum=Sum("diskspace"))["sum"] or 0
ret[dmtype] = proton_diskspace + diskspace
if dmtype == dmactions_types.INTR:
# Proton fullchip sets about 2% in expDir, the rest in report folder
values = (
expDir_objs.filter(dmfileset__type=dmtype, pk__in=proton_filestats)
.values("result__experiment__pk", "diskspace")
.distinct()
)
proton_diskspace = sum(
0.02 * v["diskspace"] for v in values if v["diskspace"]
)
values = (
reportDir_objs.filter(dmfileset__type=dmtype, pk__in=proton_filestats)
.values("result__experiment__pk", "diskspace")
.distinct()
)
proton_diskspace += sum(
0.98 * v["diskspace"] for v in values if v["diskspace"]
)
# PGM and thumbnail Intermediate Files are in report folder
objs = reportDir_objs.filter(dmfileset__type=dmtype).exclude(
pk__in=proton_filestats
)
diskspace = objs.aggregate(sum=Sum("diskspace"))["sum"] or 0
ret[dmtype] = proton_diskspace + diskspace
return ret
def get_usefull_stats():
dmfilestats = DMFileStat.objects.filter(action_state__in=["L", "S", "N", "A"])
stats = {}
for fs in FileServer.objects.all():
if os.path.exists(fs.filesPrefix):
stats[fs.filesPrefix] = {}
stats[fs.filesPrefix]["Total"] = calculate_diskspace_by_path(
dmfilestats, fs.filesPrefix
)
# get sets marked Keep
keepers = dmfilestats.filter(preserve_data=True) | dmfilestats.filter(
dmfileset__type=dmactions_types.SIG,
result__experiment__storage_options="KI",
)
stats[fs.filesPrefix]["Keep"] = calculate_diskspace_by_path(
keepers, fs.filesPrefix
)
return stats
def get_keepers_diskspace(fs_path):
""" Returns how much space on fs_path is taken up by data marked Keep """
dmfilestats = DMFileStat.objects.filter(action_state__in=["L", "S", "N", "A"])
keepers = dmfilestats.filter(preserve_data=True) | dmfilestats.filter(
dmfileset__type=dmactions_types.SIG, result__experiment__storage_options="KI"
)
keepers_diskspace = calculate_diskspace_by_path(keepers, fs_path)
return keepers_diskspace
def update_diskspace(dmfilestat, cached=None):
"""Update diskspace field in dmfilestat object"""
try:
# search both results directory and raw data directory
search_dirs = [
dmfilestat.result.get_report_dir(),
dmfilestat.result.experiment.expDir,
]
if not cached:
cached = dm_utils.get_walk_filelist(
search_dirs, list_dir=dmfilestat.result.get_report_dir()
)
total_size = 0
# Create a list of files eligible to process
# exclude onboard_results folder if thumbnail or if fullchip was reanalyzed from signal processing
sigproc_results_dir = os.path.join(
dmfilestat.result.get_report_dir(), "sigproc_results"
)
exclude_onboard_results = dmfilestat.result.isThumbnail or (
"onboard_results" not in os.path.realpath(sigproc_results_dir)
)
for start_dir in search_dirs:
to_process = []
if os.path.isdir(start_dir):
to_process, _ = dm_utils._file_selector(
start_dir,
dmfilestat.dmfileset.include,
dmfilestat.dmfileset.exclude,
[],
exclude_onboard_results,
add_linked_sigproc=True,
cached=cached,
)
# process files in list
for path in to_process[1:]:
try:
# logger.debug("%d %s %s" % (j, 'diskspace', path), extra = logid)
if not os.path.islink(path):
total_size += os.lstat(path)[6]
except Exception as inst:
if inst.errno == errno.ENOENT:
pass
else:
errmsg = "update_diskspace %s" % (inst)
logger.error(errmsg, extra=logid)
diskspace = float(total_size) / (1024 * 1024)
except:
diskspace = None
raise
finally:
dmfilestat.diskspace = diskspace
dmfilestat.save()
return diskspace
def get_dmfilestats_diskspace(dmfilestats):
diskspace_gb = {}
for dmtype in dmactions_types.FILESET_TYPES:
objs = dmfilestats.filter(
dmfileset__type=dmtype, action_state__in=["L", "S", "N"]
)
if dmtype == dmactions_types.SIG:
# count only once per experiment
values = (
objs.filter(dmfileset__type=dmtype)
.values("result__experiment__pk", "diskspace")
.distinct()
)
diskspace = sum(
v["diskspace"] for v in _unique_experiment(values) if v["diskspace"]
)
elif dmtype == dmactions_types.BASE:
# exclude results that were re-analyzed from-basecalling (have parentResult)
diskspace = (
objs.filter(result__parentResult__isnull=True).aggregate(
sum=Sum("diskspace")
)["sum"]
or 0
)
else:
diskspace = objs.aggregate(sum=Sum("diskspace"))["sum"] or 0
diskspace_gb[dmtype] = diskspace / 1024
return diskspace_gb
def dm_category_stats():
dmfilestats = DMFileStat.objects.exclude(result__experiment__expDir="")
diskspace = get_dmfilestats_diskspace(dmfilestats)
stats = []
for dmtype in dmactions_types.FILESET_TYPES:
by_type = dmfilestats.filter(dmfileset__type=dmtype)
keepers = (
by_type.filter(preserve_data=True)
if dmtype != dmactions_types.SIG
else by_type.filter(result__experiment__storage_options="KI")
)
dmtype_stats = {
"Total": by_type.count(),
"Keep": keepers.filter(
action_state__in=["L", "S", "N", "A", "SE", "EG", "E"]
).count(),
"Local": by_type.filter(action_state__in=["L", "S", "N"]).count(),
"Archived": by_type.filter(action_state="AD").count(),
"Deleted": by_type.filter(action_state="DD").count(),
"In_process": by_type.filter(
action_state__in=["AG", "DG", "EG", "SA", "SE", "SD", "IG"]
).count(),
"Error": by_type.filter(action_state="E").count(),
"diskspace": diskspace[dmtype],
}
stats.append((dmtype, dmtype_stats))
return stats
|
src/ros_comm/roswtf/src/roswtf/rules.py | jungleni/ros_code_reading | 742 | 11139504 | <reponame>jungleni/ros_code_reading<gh_stars>100-1000
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
"""
Common library for writing rule-style checks for generating warnings
and errors. Use of this style streamlines reporting.
The pattern for rules is simple: a rule provides a function that
implements the rule and a format string. If the function returns a
non-zero value, that value is combined with the format string to
produced an error reporting string. There are other conveniences as
well. If the rule returns a list or a tuple, that will be transformed
into a human-readable list.
This library is a layer on top of the base L{WtfWarning} and
L{WtfError} representation in roswtf.model.
"""
from roswtf.model import WtfWarning, WtfError
def _check_rule(rule, ret, ctx, ctx_list, level):
if ret:
d = ctx.as_dictionary()
def isstring(s):
"""Small helper version to check an object is a string in
a way that works for both Python 2 and 3
"""
try:
return isinstance(s, basestring)
except NameError:
return isinstance(s, str)
if type(ret) in (tuple, list):
f_msg = rule[1]
ret_str = '\n'.join([" * %s"%r for r in ret])
ctx_list.append(level(f_msg%d + "\n" + ret_str+'\n', f_msg, ret))
elif isstring(ret):
f_msg = rule[1]
ctx_list.append(level(f_msg%d + ret%d, f_msg, ret))
else:
f_msg = rule[1]
ctx_list.append(level(f_msg%d, f_msg, ret))
def warning_rule(rule, ret, ctx):
"""
Check return value of rule and update ctx if rule failed.
@param rule: Rule/message pair.
@type rule: (rule_fn, format_msg)
@param ret: return value of rule. If value is non-zero, rule failed
@param ret: Any
@param ctx: context for which rule failed
@param ctx: L{WtfContext}
"""
_check_rule(rule, ret, ctx, ctx.warnings, WtfWarning)
def error_rule(rule, ret, ctx):
"""
Check return value of rule and update ctx if rule failed.
@param rule: Rule/message pair.
@type rule: (rule_fn, format_msg)
@param ret: return value of rule. If value is non-zero, rule failed
@type ret: Any
@param ctx: context for which rule failed
@type ctx: L{WtfContext}
"""
_check_rule(rule, ret, ctx, ctx.errors, WtfError)
|
robinhood/exceptions.py | mrklees/robinhood-python | 106 | 11139507 | <filename>robinhood/exceptions.py
class NotFound(Exception):
pass
class NotLoggedIn(Exception):
pass
class MfaRequired(Exception):
pass
class BadRequest(Exception):
pass
class TooManyRequests(Exception):
pass
class Forbidden(Exception):
pass
|
DPGAnalysis/Skims/python/RPCNoise_example.py | ckamtsikis/cmssw | 852 | 11139531 | import FWCore.ParameterSet.Config as cms
process = cms.Process("USER")
process.load("Configuration/StandardSequences/Geometry_cff")
process.load("Configuration/StandardSequences/MagneticField_38T_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'CRUZET4_V5P::All'
process.prefer("GlobalTag")
process.load("Configuration/StandardSequences/RawToDigi_Data_cff")
process.load("Configuration/StandardSequences/ReconstructionCosmics_cff")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/00718365-02A6-DD11-86BC-000423D98E54.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/00725EB9-22A6-DD11-8EC1-001617DC1F70.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/00A93B38-26A6-DD11-8676-000423D98F98.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/00EB701D-24A6-DD11-9AA1-001617E30D38.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/00FC7A6B-E3A5-DD11-A4D1-001617DF785A.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/0239F671-DCA5-DD11-9268-000423D98844.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/02450760-F4A5-DD11-B709-000423D6BA18.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/0260B87A-49A6-DD11-9731-000423D992A4.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/02914A0F-29A6-DD11-BD17-000423D985B0.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/02C0EAC4-62A6-DD11-868F-000423D6CA02.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/02CD99AF-1BA6-DD11-B71E-000423D992DC.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/02F1E96F-50A6-DD11-938D-0019DB29C614.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/02FA5CED-56A6-DD11-8F63-001617E30D52.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/044D708C-5EA6-DD11-BFA5-0030487D0D3A.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/0457B4E8-13A6-DD11-816E-000423D98920.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/046760F2-0CA6-DD11-B377-000423D94A20.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/04766D63-5CA6-DD11-B07D-000423D98834.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/047C6309-29A6-DD11-AC8E-000423D6B42C.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/04861900-22A6-DD11-90DA-000423D944F8.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/04B99356-5AA6-DD11-950F-0030487A3C9A.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/06192894-3FA6-DD11-9A2B-000423D990CC.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/062D36C0-3CA6-DD11-BB50-001617C3B69C.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/066D62D5-56A6-DD11-970B-001617C3B6CC.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/0681071B-5DA6-DD11-A9A1-000423D99660.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/0684A267-09A6-DD11-B4FA-000423D98B5C.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/06AB249F-46A6-DD11-96C1-000423D99CEE.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/06BFD55E-1CA6-DD11-AC5A-001D09F2915A.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/08003087-DEA5-DD11-A4EE-000423D98B5C.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/021/0866B1DE-FEA5-DD11-8F9F-0030487C6062.root'
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(3000)
)
#### output module
process.load("Configuration.EventContent.EventContentCosmics_cff")
process.FEVT = cms.OutputModule("PoolOutputModule",
process.FEVTEventContent,
dataset = cms.untracked.PSet(dataTier = cms.untracked.string('RAW')),
fileName = cms.untracked.string("RPCNoise_test.root"),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('noiseEvents')
)
)
process.FEVT.outputCommands.append('keep *_*_*_*')
process.FEVT.outputCommands.append('keep FEDRawDataCollection_*_*_*')
process.FEVT.outputCommands.append('keep *_muonCSCDigis_*_*')
process.FEVT.outputCommands.append('keep *_muonDTDigis_*_*')
process.FEVT.outputCommands.append('keep *_muonRPCDigis_*_*')
process.FEVT.outputCommands.append('keep *_rpcRecHits_*_*')
#============================================================
# the filter
#============================================================
process.check = cms.EDFilter(
"RPCNoise",
fillHistograms = cms.untracked.bool(True),
histogramFileName = cms.untracked.string('histos_test.root'),
nRPCHitsCut = cms.untracked.int32(40),
nCSCWiresCut = cms.untracked.int32(10),
nCSCStripsCut = cms.untracked.int32(50),
nDTDigisCut = cms.untracked.int32(40)
)
#process.noiseEvents = cms.Path(process.muonCSCDigis *
# process.muonRPCDigis *
# process.rpcRecHits *
# process.check)
process.muondigis = cms.Sequence(process.csctfDigis+process.dttfDigis+process.gctDigis+process.gtDigis+
process.gtEvmDigis+
process.muonCSCDigis+process.muonDTDigis+process.muonRPCDigis)
process.noiseEvents = cms.Path(process.muondigis *
process.muonlocalreco *
process.check)
process.outpath = cms.EndPath(process.FEVT)
|
problems/euler/47/euler_math.py | vidyadeepa/the-coding-interview | 1,571 | 11139566 | class Primes(object):
"""
A dynamic (growing) Sieve of Erathostenes
"""
def __init__(self, sieve_size = 2**22):
self.primes = set()
self.multiples = set()
self.sieve_size = sieve_size # Initial sieve size
self.curr = 2 # Start number to search for primes
def all(self):
return self.primes
def __iter__(self):
return self
def upto(self, n):
"""
Returns all primes up to the number n
"""
for p in sorted(self.primes):
if p > n:
return
yield p
p = self.next()
while p <= n:
yield p
p = self.next()
def next(self):
# Remove all even numbers first
if self.curr == 2:
self.primes.add(2)
self.update_multiples(2)
self.curr = 3
return 2
if self.curr >= self.sieve_size - 1:
# Finished old sieve. Grow sieve.
self.grow_sieve()
# Skip to next prime
while self.curr in self.multiples:
self.curr += 2
# Found a prime!
prime = self.curr
self.primes.add(prime)
self.update_multiples(prime)
self.curr = prime + 2
return prime
def grow_sieve(self):
self.sieve_size *= 2
for p in self.primes:
self.update_multiples(p)
def update_multiples(self, p):
self.multiples.update(range(p*2, self.sieve_size, p))
def primes_upto(n):
"""
Simple Sieve of Erathostenes
"""
multiples = set()
yield 2
# Optimization: Add the even numbers
multiples.update(range(4, n, 2))
for p in range(3,n,2):
if p not in multiples:
# Found a prime!
yield p
multiples.update(range(p*2, n, p))
def is_prime(n):
"""Returns True if n is prime."""
if n == 2:
return True
if n == 3:
return True
if n % 2 == 0:
return False
if n % 3 == 0:
return False
i = 5
w = 2
while i * i <= n:
if n % i == 0:
return False
i += w
w = 6 - w
return True
def factorize(n, primes):
factors = []
for p in primes:
if p > n:
break
i = 0
while n % p == 0:
n //= p
i += 1
if i > 0:
factors.append((p,i))
if n > 1: factors.append((n,1))
return factors
def divisors(z, primes):
n = z
div = [1]
factors = factorize(n, primes)
for p,i in factors:
div = [d * p**e for d in div for e in range(i+1) if d* p**e != n]
return div
def is_pandigital(n):
digits = str(n)
unique_digits = set(digits)
length = len(digits)
return length == len(unique_digits) and str(length) in unique_digits and str(0) not in unique_digits
def _try_composite(a, d, n, s):
if pow(a, d, n) == 1:
return False
for i in range(s):
if pow(a, 2**i * d, n) == n-1:
return False
return True # n is definitely composite
def is_probably_prime(n):
d, s = n - 1, 0
while not d % 2:
d, s = d >> 1, s + 1
# Returns exact according to http://primes.utm.edu/prove/prove2_3.html
if n < 1373653:
return not any(_try_composite(a, d, n, s) for a in (2, 3))
if n < 25326001:
return not any(_try_composite(a, d, n, s) for a in (2, 3, 5))
if n < 118670087467:
if n == 3215031751:
return False
return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7))
if n < 2152302898747:
return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11))
if n < 3474749660383:
return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11, 13))
if n < 341550071728321:
return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11, 13, 17))
else:
# More sophisticated check would be required
return False
|
src/track_3d.py | noskill/JRMOT_ROS | 112 | 11139583 | <filename>src/track_3d.py<gh_stars>100-1000
# vim: expandtab:ts=4:sw=4
import numpy as np
import pdb
import torch
class TrackState:
"""
Enumeration type for the single target track state. Newly created tracks are
classified as `tentative` until enough evidence has been collected. Then,
the track state is changed to `confirmed`. Tracks that are no longer alive
are classified as `deleted` to mark them for removal from the set of active
tracks.
"""
Tentative = 1
Confirmed = 2
Deleted = 3
class Track_3d:
"""
A single target track with state space `(x, y, a, h)` and associated
velocities, where `(x, y)` is the center of the bounding box, `a` is the
aspect ratio and `h` is the height.
Parameters
----------
mean : ndarray
Mean vector of the initial state distribution.
covariance : ndarray
Covariance matrix of the initial state distribution.
track_id : int
A unique track identifier.
n_init : int
Number of consecutive detections before the track is confirmed. The
track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
max_age : int
The maximum number of consecutive misses before the track state is
set to `Deleted`.
feature : Optional[ndarray]
Feature vector of the detection this track originates from. If not None,
this feature is added to the `features` cache.
Attributes
----------
mean : ndarray
Mean vector of the initial state distribution.
covariance : ndarray
Covariance matrix of the initial state distribution.
track_id : int
A unique track identifier.
hits : int
Total number of measurement updates.
age : int
Total number of frames since first occurance.
time_since_update : int
Total number of frames since last measurement update.
state : TrackState
The current track state.
features : List[ndarray]
A cache of features. On each measurement update, the associated feature
vector is added to this list.
"""
def __init__(self, mean, covariance, track_id, n_init, max_age,
feature=None, appearance_feature = None, cuda = False, lstm = None):
self.mean = mean
self.covariance = covariance
self.track_id = track_id
self.hits = 1
self.age = 1
self.time_since_update = 0
self.tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
self.cuda = cuda
self.state = TrackState.Tentative
self.features = []
self.features_2d = []
self.hidden = None
if lstm is None:
self.features.append(feature)
self.features_2d.append(appearance_feature)
else:
self.feature_update(feature, appearance_feature, lstm)
self.first_detection = mean[:7]
self._n_init = n_init
if self.state == TrackState.Tentative and self.hits >= self._n_init:
self.state = TrackState.Confirmed
self._max_age = max_age
self.matched = True
self.exiting = False
self.last_box = None
def to_tlwh3d(self):
"""Get current position in bounding box format `(box center of bottom face [x, y, z], l, w, h)`.
Returns
-------
ndarray
The bounding box.
"""
if self.last_box is not None:
return self.last_box.box_3d
else:
return self.mean[[0,1,2,3,4,5,6]].copy()
def to_tlwh(self, kf):
"""Get current position in bounding box format `(box center of bottom face [x, y, z], l, w, h)`.
Returns
-------
ndarray
The bounding box.
"""
corner_points, _ = kf.calculate_corners(self.mean)
min_x, min_y = np.amin(corner_points, axis = 0)[:2]
max_x, max_y = np.amax(corner_points, axis = 0)[:2]
ret = np.array([min_x, min_y, max_x - min_x, max_y - min_y])
return ret
def predict(self, kf):
"""Propagate the state distribution to the current time step using a
Kalman filter prediction step.
Parameters
----------
kf : kalman_filter.KalmanFilter
The Kalman filter.
"""
self.mean, self.covariance = kf.predict(self.mean, self.covariance)
self.age += 1
self.time_since_update += 1
# @profile
def update(self, kf, detection, compare_2d=False,
marginalization=None, detection_idx=None, JPDA=False, lstm = None):
"""Perform Kalman filter measurement update step and update the feature
cache.
Parameters
----------
kf : kalman_filter.KalmanFilter
The Kalman filter.
detection : Detection
The associated detection.
"""
if JPDA:
detections_2d = [det.tlwh for det in detection]
if compare_2d:
detections_3d = None
else:
detections_3d = [np.copy(det.box_3d) for det in detection]
for det in detections_3d:
if det[6] - self.mean[6] > np.pi:
det[6] -= 2 * np.pi
elif det[6] - self.mean[6] < -np.pi:
det[6] += 2*np.pi
self.mean, self.covariance, self.mean_post_3d = kf.update(
self.mean, self.covariance, detections_2d, detections_3d, marginalization, JPDA)
self.mean[6] = self.mean[6] % (2 * np.pi)
self.feature_update(detection, detection_idx, lstm)
if np.argmax(marginalization) != 0:
self.matched=True
else:
self.matched=False
if detection_idx < 0:
self.last_box = None
return
self.hits += 1
self.time_since_update = 0
detection = detection[detection_idx]
self.last_box = detection
else:
detection = detection[detection_idx]
detections_3d = detections_3d[detection_idx]
self.mean, self.covariance = kf.update(
self.mean, self.covariance, detection.tlwh, detections_3d)
if self.state == TrackState.Tentative and self.hits >= self._n_init:
self.state = TrackState.Confirmed
def mark_missed(self):
"""Mark this track as missed (no association at the current time step).
"""
if self.state == TrackState.Tentative:
self.state = TrackState.Deleted
elif self.time_since_update > self._max_age:
self.state = TrackState.Deleted
def is_tentative(self):
"""Returns True if this track is tentative (unconfirmed).
"""
return self.state == TrackState.Tentative
def is_confirmed(self):
"""Returns True if this track is confirmed."""
return self.state == TrackState.Confirmed
def is_deleted(self):
"""Returns True if this track is dead and should be deleted."""
return self.state == TrackState.Deleted
def feature_update(self, detections, detection_idx, lstm, JPDA=False, marginalization=None):
if JPDA:
features=[d.feature for d in detections]
appearance_features=[d.appearance_feature for d in detections]
if len([i for i in features if i is None])==0:
combined_feature=np.sum(np.array(features).reshape(len(features), -1)
*marginalization[1:].reshape(-1, 1), axis=0).astype(np.float32)
self.features.append(combined_feature)
if len([i for i in appearance_features if i is None])==0:
combined_feature=np.sum(
np.array(appearance_features).reshape(len(appearance_features), -1)
*marginalization[1:].reshape(-1, 1), axis=0).astype(np.float32)
self.features_2d.append(combined_feature)
else:
feature = detections[detection_idx].feature
appearance_feature = detections[detection_idx].appearance_feature
if feature is not None:
if lstm is not None:
input_feature = torch.Tensor(feature).type(self.tensor)
input_feature = input_feature.unsqueeze(0)
with torch.no_grad():
if self.hidden is None:
output_feature, self.hidden = lstm(input_feature)
else:
output_feature, self.hidden = lstm(input_feature, self.hidden)
output_feature = output_feature.cpu().numpy().squeeze(0)
else:
output_feature = feature
self.features.append(output_feature)
if appearance_feature is not None:
self.features_2d.append(appearance_feature)
def get_cov(self):
xyz_cov = self.covariance[:3, :3]
theta_cov_1 = self.covariance[7, :3]
theta_cov_2 = self.covariance[7, 7]
out_cov = np.zeros((6, 6))
out_cov[:3,:3] = xyz_cov
out_cov[5, :3] = theta_cov_1
out_cov[:3, 5] = theta_cov_1
out_cov[5, 5] = theta_cov_2
return out_cov |
PyObjCTest/test_nsobjcruntime.py | Khan/pyobjc-framework-Cocoa | 132 | 11139584 | from PyObjCTools.TestSupport import *
import sys
from Foundation import *
try:
unicode
except NameError:
unicode = str
try:
long
except NameError:
long = int
class TestNSObjCRuntime (TestCase):
def testConstants(self):
self.assertEqual(NSFoundationVersionNumber10_0, 397.40)
self.assertEqual(NSFoundationVersionNumber10_1, 425.00)
self.assertEqual(NSFoundationVersionNumber10_1_1, 425.00)
self.assertEqual(NSFoundationVersionNumber10_1_2, 425.00)
self.assertEqual(NSFoundationVersionNumber10_1_3, 425.00)
self.assertEqual(NSFoundationVersionNumber10_1_4, 425.00)
self.assertEqual(NSFoundationVersionNumber10_2, 462.00)
self.assertEqual(NSFoundationVersionNumber10_2_1, 462.00)
self.assertEqual(NSFoundationVersionNumber10_2_2, 462.00)
self.assertEqual(NSFoundationVersionNumber10_2_3, 462.00)
self.assertEqual(NSFoundationVersionNumber10_2_4, 462.00)
self.assertEqual(NSFoundationVersionNumber10_2_5, 462.00)
self.assertEqual(NSFoundationVersionNumber10_2_6, 462.00)
self.assertEqual(NSFoundationVersionNumber10_2_7, 462.70)
self.assertEqual(NSFoundationVersionNumber10_2_8, 462.70)
self.assertEqual(NSFoundationVersionNumber10_3, 500.00)
self.assertEqual(NSFoundationVersionNumber10_3_1, 500.00)
self.assertEqual(NSFoundationVersionNumber10_3_2, 500.30)
self.assertEqual(NSFoundationVersionNumber10_3_3, 500.54)
self.assertEqual(NSFoundationVersionNumber10_3_4, 500.56)
self.assertEqual(NSFoundationVersionNumber10_3_5, 500.56)
self.assertEqual(NSFoundationVersionNumber10_3_6, 500.56)
self.assertEqual(NSFoundationVersionNumber10_3_7, 500.56)
self.assertEqual(NSFoundationVersionNumber10_3_8, 500.56)
self.assertEqual(NSFoundationVersionNumber10_3_9, 500.58)
self.assertEqual(NSFoundationVersionNumber10_4, 567.00)
self.assertEqual(NSFoundationVersionNumber10_4_1, 567.00)
self.assertEqual(NSFoundationVersionNumber10_4_2, 567.12)
self.assertEqual(NSFoundationVersionNumber10_4_3, 567.21)
self.assertEqual(NSFoundationVersionNumber10_4_4_Intel, 567.23)
self.assertEqual(NSFoundationVersionNumber10_4_4_PowerPC, 567.21)
self.assertEqual(NSFoundationVersionNumber10_4_5, 567.25)
self.assertEqual(NSFoundationVersionNumber10_4_6, 567.26)
self.assertEqual(NSFoundationVersionNumber10_4_7, 567.27)
self.assertEqual(NSFoundationVersionNumber10_4_8, 567.28)
self.assertEqual(NSFoundationVersionNumber10_4_9, 567.29)
self.assertEqual(NSFoundationVersionNumber10_4_10, 567.29)
self.assertEqual(NSFoundationVersionNumber10_4_11, 567.36)
self.assertEqual( NSFoundationVersionNumber10_5, 677.00)
self.assertEqual( NSFoundationVersionNumber10_5_1, 677.10)
self.assertEqual( NSFoundationVersionNumber10_5_2, 677.15)
self.assertEqual( NSFoundationVersionNumber10_5_3, 677.19)
self.assertEqual( NSFoundationVersionNumber10_5_4, 677.19)
self.assertEqual( NSFoundationVersionNumber10_5_5, 677.21)
self.assertEqual( NSFoundationVersionNumber10_5_6, 677.22)
self.assertEqual( NSFoundationVersionNumber10_5_7, 677.24)
self.assertEqual( NSFoundationVersionNumber10_5_8, 677.26)
self.assertEqual( NSFoundationVersionNumber10_6, 751.00)
self.assertEqual( NSFoundationVersionNumber10_6_1, 751.00)
self.assertEqual( NSFoundationVersionNumber10_6_2, 751.14)
self.assertEqual( NSFoundationVersionNumber10_6_3, 751.21)
self.assertEqual( NSFoundationVersionNumber10_6_4, 751.29)
self.assertEqual( NSFoundationVersionNumber10_6_5, 751.42)
self.assertIsInstance(NSFoundationVersionNumber, float)
self.assertIsInstance(NSIntegerMax, (int, long))
self.assertIsInstance(NSIntegerMin, (int, long))
self.assertIsInstance(NSUIntegerMax, (int, long))
if sys.maxsize > 2 ** 32:
self.assertEqual(NSIntegerMax, 2 ** 63 -1)
self.assertEqual(NSIntegerMin, -(2 ** 63))
self.assertEqual(NSUIntegerMax, 2**64-1)
else:
self.assertEqual(NSIntegerMax, 2 ** 31 -1)
self.assertEqual(NSIntegerMin, -(2 ** 31))
self.assertEqual(NSUIntegerMax, 2**32-1)
self.assertTrue(YES)
self.assertFalse(NO)
self.assertEqual(NSOrderedAscending, -1)
self.assertEqual(NSOrderedSame, 0)
self.assertEqual(NSOrderedDescending, 1)
self.assertEqual(NSNotFound, NSIntegerMax)
@min_os_level('10.6')
def testConstants10_6(self):
self.assertEqual(NSFoundationVersionNumber10_5, 677.00)
self.assertEqual(NSFoundationVersionNumber10_5_1, 677.10)
self.assertEqual(NSFoundationVersionNumber10_5_2, 677.15)
self.assertEqual(NSFoundationVersionNumber10_5_3, 677.19)
self.assertEqual(NSFoundationVersionNumber10_5_4, 677.19)
self.assertEqual(NSFoundationVersionNumber10_5_5, 677.21)
self.assertEqual(NSFoundationVersionNumber10_5_6, 677.22)
self.assertEqual(NSEnumerationConcurrent, 1<<0)
self.assertEqual(NSEnumerationReverse, 1<<1)
self.assertEqual(NSSortConcurrent, 1<<0)
self.assertEqual(NSSortStable, 1<<4)
@min_os_level('10.7')
def testConstants10_7(self):
self.assertEqual(NSFoundationVersionNumber10_7, 833.10)
self.assertEqual(NSFoundationVersionNumber10_7_1, 833.10)
self.assertEqual(NSFoundationVersionNumber10_7_2, 833.20)
self.assertEqual(NSFoundationVersionNumber10_7_3, 833.24)
self.assertEqual(NSFoundationVersionNumber10_7_4, 833.25)
def testSelectorAccess(self):
v = NSStringFromSelector('description')
self.assertIsInstance(v, unicode)
self.assertEqual(v, 'description')
v = NSSelectorFromString(b"description".decode('ascii'))
self.assertIsInstance(v, str)
self.assertEqual(v, 'description')
def testClassAccess(self):
v = NSStringFromClass(NSObject)
self.assertIsInstance(v, unicode)
self.assertEqual(v, 'NSObject')
v = NSClassFromString('NSDictionary')
self.assertIsInstance(v, objc.objc_class)
self.assertEqual(v, NSDictionary)
def testProtocolAccess(self):
p = NSProtocolFromString('NSObject')
self.assertIsInstance(p, objc.formal_protocol)
v = NSStringFromProtocol(p)
self.assertIsInstance(v, unicode)
self.assertEqual(v, 'NSObject')
def testTypeInfo(self):
rest, size, align = NSGetSizeAndAlignment(b"ii", None, None)
self.assertEqual(rest, b"i")
self.assertIsInstance(size, (int, long))
self.assertIsInstance(align, (int, long))
def testMinMax(self):
self.assertEqual(MAX(1, 2), 2)
self.assertEqual(MAX("a", "b"), "b")
self.assertEqual(MIN(1, 2), 1)
self.assertEqual(MIN("a", "b"), "a")
self.assertEqual(ABS(-1), 1)
self.assertEqual(ABS(-1.0), 1.0)
def testFunctions(self):
self.assertArgIsPrintf(NSLog, 0)
if __name__ == "__main__":
main()
|
content/test/gpu/run_unittests.py | iridium-browser/iridium-browser | 575 | 11139591 | <filename>content/test/gpu/run_unittests.py
#!/usr/bin/env vpython
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script runs unit tests of the code in the gpu_tests/ directory.
This script DOES NOT run tests. run_gpu_test does that.
"""
import sys
from gpu_tests import path_util
path_util.SetupTelemetryPaths()
import gpu_project_config
from telemetry.testing import unittest_runner
def main():
args = sys.argv[1:]
return unittest_runner.Run(gpu_project_config.CONFIG,
no_browser=True,
passed_args=args)
if __name__ == '__main__':
sys.exit(main())
|
fuzzinator/reduce/reducer.py | akosthekiss/fuzzinator | 202 | 11139599 | <filename>fuzzinator/reduce/reducer.py
# Copyright (c) 2021 <NAME>, <NAME>.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
class Reducer(object):
"""
Abstract base class to represent test case reducers.
"""
def __call__(self, *, sut_call, issue, on_job_progressed):
"""
Reduce the test case of ``issue`` while ensuring that the reduced test
case still triggers the original issue. Return a tuple consisting of a
reduced test case for the issue (or ``None`` if the issue's current test
case could not be reduced) and a (potentially empty) list of new issues
that were discovered during test case reduction (if any).
Raises :exc:`NotImplementedError` by default.
:param Call sut_call: The SUT call that reported the original issue.
:param dict[str, Any] issue: The original issue, the test case of which
should be reduced.
:param on_job_progressed: A bound
:meth:`~fuzzinator.listener.EventListener.on_job_progressed` method
with frozen ``job_id`` argument that may be notified about the
progress of the reduction.
:return: The reduced test case (if reduction was successful) and the
list of new issues detected during reduction (if any).
:rtype: tuple[Any or None, list[dict[str, Any]]]
"""
raise NotImplementedError()
|
tests/slashless/api/resources.py | pavanv/django-tastypie | 1,570 | 11139621 | from django.contrib.auth.models import User
from tastypie import fields
from tastypie.resources import ModelResource
from tastypie.authorization import Authorization
from basic.models import Note
class UserResource(ModelResource):
class Meta:
resource_name = 'users'
queryset = User.objects.all()
authorization = Authorization()
class NoteResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta:
resource_name = 'notes'
queryset = Note.objects.all()
authorization = Authorization()
|
libs/core/loss.py | zj1008/GALD-DGCNet | 127 | 11139634 | # CE-loss
import torch.nn as nn
import torch
import torch.nn.functional as F
class OhemCrossEntropy2dTensor(nn.Module):
def __init__(self, ignore_label, reduction='elementwise_mean', thresh=0.6, min_kept=256,
down_ratio=1, use_weight=False):
super(OhemCrossEntropy2dTensor, self).__init__()
self.ignore_label = ignore_label
self.thresh = float(thresh)
self.min_kept = int(min_kept)
self.down_ratio = down_ratio
if use_weight:
weight = torch.FloatTensor(
[0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489,
0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955,
1.0865, 1.1529, 1.0507])
self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction,
weight=weight,
ignore_index=ignore_label)
else:
self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction,
ignore_index=ignore_label)
def forward(self, pred, target):
b, c, h, w = pred.size()
target = target.view(-1)
valid_mask = target.ne(self.ignore_label)
target = target * valid_mask.long()
num_valid = valid_mask.sum()
prob = F.softmax(pred, dim=1)
prob = (prob.transpose(0, 1)).reshape(c, -1)
if self.min_kept > num_valid:
print('Labels: {}'.format(num_valid))
elif num_valid > 0:
prob = prob.masked_fill_(~valid_mask, 1)
mask_prob = prob[
target, torch.arange(len(target), dtype=torch.long)]
threshold = self.thresh
if self.min_kept > 0:
_, index = mask_prob.sort()
threshold_index = index[min(len(index), self.min_kept) - 1]
if mask_prob[threshold_index] > self.thresh:
threshold = mask_prob[threshold_index]
kept_mask = mask_prob.le(threshold)
target = target * kept_mask.long()
valid_mask = valid_mask * kept_mask
target = target.masked_fill_(~valid_mask, self.ignore_label)
target = target.view(b, h, w)
return self.criterion(pred, target)
class CriterionDSN(nn.CrossEntropyLoss):
def __init__(self, ignore_index=255,reduce=True):
super(CriterionDSN, self).__init__()
self.ignore_index = ignore_index
self.reduce = reduce
def forward(self, preds, target):
scale_pred = preds[0]
loss1 = super(CriterionDSN, self).forward(scale_pred, target)
scale_pred = preds[1]
loss2 = super(CriterionDSN, self).forward(scale_pred, target)
return loss1 + loss2 * 0.4
class CriterionOhemDSN(nn.Module):
'''
DSN : We need to consider two supervision for the models.
'''
def __init__(self, ignore_index=255, thresh=0.7, min_kept=100000, reduce=True):
super(CriterionOhemDSN, self).__init__()
self.ignore_index = ignore_index
self.criterion1 = OhemCrossEntropy2dTensor(ignore_index, thresh=thresh, min_kept=min_kept)
self.criterion2 = torch.nn.CrossEntropyLoss(ignore_index=ignore_index, reduce=reduce)
if not reduce:
print("disabled the reduce.")
def forward(self, preds, target):
h, w = target.size(1), target.size(2)
scale_pred = F.upsample(input=preds[0], size=(h, w), mode='bilinear', align_corners=True)
loss1 = self.criterion1(scale_pred, target)
scale_pred = F.upsample(input=preds[1], size=(h, w), mode='bilinear', align_corners=True)
loss2 = self.criterion2(scale_pred, target)
return loss1 + loss2 * 0.4 |
tests/package/test_meta.py | franzbischoff/platformio-core | 4,744 | 11139637 | <filename>tests/package/test_meta.py
# Copyright (c) 2014-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import jsondiff
import semantic_version
from platformio.package.meta import (
PackageMetaData,
PackageOutdatedResult,
PackageSpec,
PackageType,
)
def test_outdated_result():
result = PackageOutdatedResult(current="1.2.3", latest="2.0.0")
assert result.is_outdated()
assert result.is_outdated(allow_incompatible=True)
result = PackageOutdatedResult(current="1.2.3", latest="2.0.0", wanted="1.5.4")
assert result.is_outdated()
assert result.is_outdated(allow_incompatible=True)
result = PackageOutdatedResult(current="1.2.3", latest="2.0.0", wanted="1.2.3")
assert not result.is_outdated()
assert result.is_outdated(allow_incompatible=True)
result = PackageOutdatedResult(current="1.2.3", latest="2.0.0", detached=True)
assert not result.is_outdated()
assert not result.is_outdated(allow_incompatible=True)
def test_spec_owner():
assert PackageSpec("alice/foo library") == PackageSpec(
owner="alice", name="foo library"
)
spec = PackageSpec(" Bob / BarUpper ")
assert spec != PackageSpec(owner="BOB", name="BARUPPER")
assert spec.owner == "Bob"
assert spec.name == "BarUpper"
def test_spec_id():
assert PackageSpec(13) == PackageSpec(id=13)
assert PackageSpec("20") == PackageSpec(id=20)
spec = PackageSpec("id=199")
assert spec == PackageSpec(id=199)
assert isinstance(spec.id, int)
def test_spec_name():
assert PackageSpec("foo") == PackageSpec(name="foo")
assert PackageSpec(" bar-24 ") == PackageSpec(name="bar-24")
def test_spec_requirements():
assert PackageSpec("[email protected]") == PackageSpec(name="foo", requirements="1.2.3")
assert PackageSpec(
name="foo", requirements=semantic_version.Version("1.2.3")
) == PackageSpec(name="foo", requirements="1.2.3")
assert PackageSpec("bar @ ^1.2.3") == PackageSpec(name="bar", requirements="^1.2.3")
assert PackageSpec("13 @ ~2.0") == PackageSpec(id=13, requirements="~2.0")
assert PackageSpec(
name="hello", requirements=semantic_version.SimpleSpec("~1.2.3")
) == PackageSpec(name="hello", requirements="~1.2.3")
spec = PackageSpec("id=20 @ !=1.2.3,<2.0")
assert not spec.external
assert isinstance(spec.requirements, semantic_version.SimpleSpec)
assert semantic_version.Version("1.3.0-beta.1") in spec.requirements
assert spec == PackageSpec(id=20, requirements="!=1.2.3,<2.0")
def test_spec_local_urls(tmpdir_factory):
assert PackageSpec("file:///tmp/foo.tar.gz") == PackageSpec(
url="file:///tmp/foo.tar.gz", name="foo"
)
assert PackageSpec("customName=file:///tmp/bar.zip") == PackageSpec(
url="file:///tmp/bar.zip", name="customName"
)
assert PackageSpec("file:///tmp/some-lib/") == PackageSpec(
url="file:///tmp/some-lib/", name="some-lib"
)
# detached package
assert PackageSpec("file:///tmp/some-lib@src-67e1043a673d2") == PackageSpec(
url="file:///tmp/some-lib@src-67e1043a673d2", name="some-lib"
)
# detached folder without scheme
pkg_dir = tmpdir_factory.mktemp("storage").join("[email protected]").mkdir()
assert PackageSpec(str(pkg_dir)) == PackageSpec(
name="detached", url="file://%s" % pkg_dir
)
def test_spec_external_urls():
assert PackageSpec(
"https://github.com/platformio/platformio-core/archive/develop.zip"
) == PackageSpec(
url="https://github.com/platformio/platformio-core/archive/develop.zip",
name="platformio-core",
)
assert PackageSpec(
"https://github.com/platformio/platformio-core/archive/develop.zip?param=value"
" @ !=2"
) == PackageSpec(
url="https://github.com/platformio/platformio-core/archive/"
"develop.zip?param=value",
name="platformio-core",
requirements="!=2",
)
spec = PackageSpec(
"Custom-Name="
"https://github.com/platformio/platformio-core/archive/[email protected]"
)
assert spec.external
assert spec.has_custom_name()
assert spec.name == "Custom-Name"
assert spec == PackageSpec(
url="https://github.com/platformio/platformio-core/archive/develop.tar.gz",
name="Custom-Name",
requirements="4.4.0",
)
def test_spec_vcs_urls():
assert PackageSpec("https://github.com/platformio/platformio-core") == PackageSpec(
name="platformio-core", url="git+https://github.com/platformio/platformio-core"
)
assert PackageSpec("https://gitlab.com/username/reponame") == PackageSpec(
name="reponame", url="git+https://gitlab.com/username/reponame"
)
assert PackageSpec(
"wolfSSL=https://os.mbed.com/users/wolfSSL/code/wolfSSL/"
) == PackageSpec(
name="wolfSSL", url="hg+https://os.mbed.com/users/wolfSSL/code/wolfSSL/"
)
assert PackageSpec(
"https://github.com/platformio/platformio-core.git#master"
) == PackageSpec(
name="platformio-core",
url="git+https://github.com/platformio/platformio-core.git#master",
)
assert PackageSpec(
"core=git+ssh://github.com/platformio/platformio-core.git#[email protected]"
) == PackageSpec(
name="core",
url="git+ssh://github.com/platformio/platformio-core.git#v4.4.0",
requirements="4.4.0",
)
assert PackageSpec(
"<EMAIL>:platformio/platformio-core.git"
) == PackageSpec(
name="platformio-core",
url="git+<EMAIL>:platformio/platformio-core.git",
)
assert PackageSpec(
"[email protected]:platformio/platformio-core.git @ ^1.2.3,!=5"
) == PackageSpec(
name="pkg",
url="git+<EMAIL>@<EMAIL>:platformio/platformio-core.git",
requirements="^1.2.3,!=5",
)
assert PackageSpec(
owner="platformio",
name="external-repo",
requirements="https://github.com/platformio/platformio-core",
) == PackageSpec(
owner="platformio",
name="external-repo",
url="git+https://github.com/platformio/platformio-core",
)
def test_spec_as_dict():
assert not jsondiff.diff(
PackageSpec("bob/[email protected]").as_dict(),
{
"owner": "bob",
"id": None,
"name": "foo",
"requirements": "1.2.3",
"url": None,
},
)
assert not jsondiff.diff(
PackageSpec(
"https://github.com/platformio/platformio-core/archive/develop.zip?param=value"
" @ !=2"
).as_dict(),
{
"owner": None,
"id": None,
"name": "platformio-core",
"requirements": "!=2",
"url": "https://github.com/platformio/platformio-core/archive/develop.zip?param=value",
},
)
def test_spec_as_dependency():
assert PackageSpec("owner/pkgname").as_dependency() == "owner/pkgname"
assert PackageSpec(owner="owner", name="pkgname").as_dependency() == "owner/pkgname"
assert PackageSpec("bob/foo @ ^1.2.3").as_dependency() == "bob/foo@^1.2.3"
assert (
PackageSpec(
"https://github.com/o/r/a/develop.zip?param=value @ !=2"
).as_dependency()
== "https://github.com/o/r/a/develop.zip?param=value @ !=2"
)
assert (
PackageSpec(
"wolfSSL=https://os.mbed.com/users/wolfSSL/code/wolfSSL/"
).as_dependency()
== "wolfSSL=https://os.mbed.com/users/wolfSSL/code/wolfSSL/"
)
def test_metadata_as_dict():
metadata = PackageMetaData(PackageType.LIBRARY, "foo", "1.2.3")
# test setter
metadata.version = "0.1.2+12345"
assert metadata.version == semantic_version.Version("0.1.2+12345")
assert not jsondiff.diff(
metadata.as_dict(),
{
"type": PackageType.LIBRARY,
"name": "foo",
"version": "0.1.2+12345",
"spec": None,
},
)
assert not jsondiff.diff(
PackageMetaData(
PackageType.TOOL,
"toolchain",
"2.0.5",
PackageSpec("platformio/toolchain@~2.0.0"),
).as_dict(),
{
"type": PackageType.TOOL,
"name": "toolchain",
"version": "2.0.5",
"spec": {
"owner": "platformio",
"id": None,
"name": "toolchain",
"requirements": "~2.0.0",
"url": None,
},
},
)
def test_metadata_dump(tmpdir_factory):
pkg_dir = tmpdir_factory.mktemp("package")
metadata = PackageMetaData(
PackageType.TOOL,
"toolchain",
"2.0.5",
PackageSpec("platformio/toolchain@~2.0.0"),
)
dst = pkg_dir.join(".piopm")
metadata.dump(str(dst))
assert os.path.isfile(str(dst))
contents = dst.read()
assert all(s in contents for s in ("null", '"~2.0.0"'))
def test_metadata_load(tmpdir_factory):
contents = """
{
"name": "foo",
"spec": {
"name": "foo",
"owner": "username",
"requirements": "!=3.4.5"
},
"type": "platform",
"version": "0.1.3"
}
"""
pkg_dir = tmpdir_factory.mktemp("package")
dst = pkg_dir.join(".piopm")
dst.write(contents)
metadata = PackageMetaData.load(str(dst))
assert metadata.version == semantic_version.Version("0.1.3")
assert metadata == PackageMetaData(
PackageType.PLATFORM,
"foo",
"0.1.3",
spec=PackageSpec(owner="username", name="foo", requirements="!=3.4.5"),
)
piopm_path = pkg_dir.join(".piopm")
metadata = PackageMetaData(
PackageType.LIBRARY, "mylib", version="1.2.3", spec=PackageSpec("mylib")
)
metadata.dump(str(piopm_path))
restored_metadata = PackageMetaData.load(str(piopm_path))
assert metadata == restored_metadata
|
venv/Lib/site-packages/statsmodels/tsa/statespace/cfa_simulation_smoother.py | EkremBayar/bayar | 6,931 | 11139672 | """
"Cholesky Factor Algorithm" (CFA) simulation smoothing for state space models
Author: <NAME>
License: BSD-3
"""
import numpy as np
from . import tools
class CFASimulationSmoother(object):
r"""
"Cholesky Factor Algorithm" (CFA) simulation smoother
Parameters
----------
model : Representation
The state space model.
Notes
-----
This class allows simulation smoothing by the "Cholesky Factor Algorithm"
(CFA) described in [1]_ and [2]_, which essentially takes advantage of the
existence of an efficient sparse Cholesky factor algorithm for banded
matrices that are held in a sparse matrix format.
In particular, this simulation smoother computes the joint posterior mean
and covariance matrix for the unobserved state vector all at once, rather
than using the recursive computations of the Kalman filter and smoother. It
then uses these posterior moments to sample directly from this joint
posterior. For some models, it can be more computationally efficient than
the simulation smoother based on the Kalman filter and smoother.
**Important caveat**:
However, this simulation smoother cannot be used with all state space
models, including several of the most popular. In particular, the CFA
algorithm cannot support degenerate distributions (i.e. positive
semi-definite covariance matrices) for the initial state (which is the
prior for the first state) or the observation or state innovations.
One practical problem with this algorithm is that an autoregressive term
with order higher than one is typically put into state space form by
augmenting the states using identities. As identities, these augmenting
terms will not be subject to random innovations, and so the state
innovation will be degenerate. It is possible to take these higher order
terms directly into account when constructing the posterior covariance
matrix, but this has not yet been implemented.
Similarly, some state space forms of SARIMA and VARMA models make
the observation equation an identity, which is not compatible with the CFA
simulation smoothing approach.
This simulation smoother has so-far found most of its use with dynamic
factor and stochastic volatility models, which satisfy the restrictions
described above.
**Not-yet-implemented**:
There are several features that are not yet available with this simulation
smoother:
- It does not yet allow diffuse initialization of the state vector.
- It produces simulated states only for exactly the observations in the
model (i.e. it cannot produce simulations for a subset of the model
observations or for observations outside the model).
References
----------
.. [1] McCausland, <NAME>., <NAME>, and <NAME>.
"Simulation smoothing for state-space models: A computational
efficiency analysis."
Computational Statistics & Data Analysis 55, no. 1 (2011): 199-212.
.. [2] Chan, <NAME>, and <NAME>.
"Efficient simulation and integrated likelihood estimation in
state space models."
International Journal of Mathematical Modelling and Numerical
Optimisation 1, no. 1-2 (2009): 101-120.
"""
def __init__(self, model, cfa_simulation_smoother_classes=None):
self.model = model
# Get the simulation smoother classes
self.prefix_simulation_smoother_map = (
cfa_simulation_smoother_classes
if cfa_simulation_smoother_classes is not None
else tools.prefix_cfa_simulation_smoother_map.copy())
self._simulation_smoothers = {}
self._posterior_mean = None
self._posterior_cov_inv_chol = None
self._posterior_cov = None
self._simulated_state = None
@property
def _simulation_smoother(self):
prefix = self.model.prefix
if prefix in self._simulation_smoothers:
return self._simulation_smoothers[prefix]
return None
@property
def posterior_mean(self):
r"""
Posterior mean of the states conditional on the data
Notes
-----
.. math::
\hat \alpha_t = E[\alpha_t \mid Y^n ]
This posterior mean is identical to the `smoothed_state` computed by
the Kalman smoother.
"""
if self._posterior_mean is None:
self._posterior_mean = np.array(
self._simulation_smoother.posterior_mean, copy=True)
return self._posterior_mean
@property
def posterior_cov_inv_chol_sparse(self):
r"""
Sparse Cholesky factor of inverse posterior covariance matrix
Notes
-----
This attribute holds in sparse diagonal banded storage the Cholesky
factor of the inverse of the posterior covariance matrix. If we denote
:math:`P = Var[\alpha \mid Y^n ]`, then the this attribute holds the
lower Cholesky factor :math:`L`, defined from :math:`L L' = P^{-1}`.
This attribute uses the sparse diagonal banded storage described in the
documentation of, for example, the SciPy function
`scipy.linalg.solveh_banded`.
"""
if self._posterior_cov_inv_chol is None:
self._posterior_cov_inv_chol = np.array(
self._simulation_smoother.posterior_cov_inv_chol, copy=True)
return self._posterior_cov_inv_chol
@property
def posterior_cov(self):
r"""
Posterior covariance of the states conditional on the data
Notes
-----
**Warning**: the matrix computed when accessing this property can be
extremely large: it is shaped `(nobs * k_states, nobs * k_states)`. In
most cases, it is better to use the `posterior_cov_inv_chol_sparse`
property if possible, which holds in sparse diagonal banded storage
the Cholesky factor of the inverse of the posterior covariance matrix.
.. math::
Var[\alpha \mid Y^n ]
This posterior covariance matrix is *not* identical to the
`smoothed_state_cov` attribute produced by the Kalman smoother, because
it additionally contains all cross-covariance terms. Instead,
`smoothed_state_cov` contains the `(k_states, k_states)` block
diagonal entries of this posterior covariance matrix.
"""
if self._posterior_cov is None:
from scipy.linalg import cho_solve_banded
inv_chol = self.posterior_cov_inv_chol_sparse
self._posterior_cov = cho_solve_banded(
(inv_chol, True), np.eye(inv_chol.shape[1]))
return self._posterior_cov
def simulate(self, variates=None, update_posterior=True):
r"""
Perform simulation smoothing (via Cholesky factor algorithm)
Does not return anything, but populates the object's `simulated_state`
attribute, and also makes available the attributes `posterior_mean`,
`posterior_cov`, and `posterior_cov_inv_chol_sparse`.
Parameters
----------
variates : array_like, optional
Random variates, distributed standard Normal. Usually only
specified if results are to be replicated (e.g. to enforce a seed)
or for testing. If not specified, random variates are drawn. Must
be shaped (nobs, k_states).
Notes
-----
The first step in simulating from the joint posterior of the state
vector conditional on the data is to compute the two relevant moments
of the joint posterior distribution:
.. math::
\alpha \mid Y_n \sim N(\hat \alpha, Var(\alpha \mid Y_n))
Let :math:`L L' = Var(\alpha \mid Y_n)^{-1}`. Then simulation proceeds
according to the following steps:
1. Draw :math:`u \sim N(0, I)`
2. Compute :math:`x = \hat \alpha + (L')^{-1} u`
And then :math:`x` is a draw from the joint posterior of the states.
The output of the function is as follows:
- The simulated draw :math:`x` is held in the `simulated_state`
attribute.
- The posterior mean :math:`\hat \alpha` is held in the
`posterior_mean` attribute.
- The (lower triangular) Cholesky factor of the inverse posterior
covariance matrix, :math:`L`, is held in sparse diagonal banded
storage in the `posterior_cov_inv_chol` attribute.
- The posterior covariance matrix :math:`Var(\alpha \mid Y_n)` can be
computed on demand by accessing the `posterior_cov` property. Note
that this matrix can be extremely large, so care must be taken when
accessing this property. In most cases, it will be preferred to make
use of the `posterior_cov_inv_chol` attribute rather than the
`posterior_cov` attribute.
"""
# (Re) initialize the _statespace representation
prefix, dtype, create = self.model._initialize_representation()
# Validate variates and get in required datatype
if variates is not None:
tools.validate_matrix_shape('variates', variates.shape,
self.model.k_states,
self.model.nobs, 1)
variates = np.ravel(variates, order='F').astype(dtype)
# (Re) initialize the state
self.model._initialize_state(prefix=prefix)
# Construct the Cython simulation smoother instance, if necessary
if create or prefix not in self._simulation_smoothers:
cls = self.prefix_simulation_smoother_map[prefix]
self._simulation_smoothers[prefix] = cls(
self.model._statespaces[prefix])
sim = self._simulation_smoothers[prefix]
# Update posterior moments, if requested
if update_posterior:
sim.update_sparse_posterior_moments()
self._posterior_mean = None
self._posterior_cov_inv_chol = None
self._posterior_cov = None
# Perform simulation smoothing
self.simulated_state = sim.simulate(variates=variates)
|
social/tests/backends/test_evernote.py | raccoongang/python-social-auth | 1,987 | 11139675 | <gh_stars>1000+
from requests import HTTPError
from social.p3 import urlencode
from social.exceptions import AuthCanceled
from social.tests.backends.oauth import OAuth1Test
class EvernoteOAuth1Test(OAuth1Test):
backend_path = 'social.backends.evernote.EvernoteOAuth'
expected_username = '101010'
access_token_body = urlencode({
'edam_webApiUrlPrefix': 'https://sandbox.evernote.com/shard/s1/',
'edam_shard': 's1',
'oauth_token': 'foobar',
'edam_expires': '1395118279645',
'edam_userId': '101010',
'edam_noteStoreUrl': 'https://sandbox.evernote.com/shard/s1/notestore'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
class EvernoteOAuth1CanceledTest(EvernoteOAuth1Test):
access_token_status = 401
def test_login(self):
with self.assertRaises(AuthCanceled) as cm:
self.do_login()
self.assertTrue(cm.exception.response is not None)
def test_partial_pipeline(self):
with self.assertRaises(AuthCanceled) as cm:
self.do_partial_pipeline()
self.assertTrue(cm.exception.response is not None)
class EvernoteOAuth1ErrorTest(EvernoteOAuth1Test):
access_token_status = 500
def test_login(self):
with self.assertRaises(HTTPError):
self.do_login()
def test_partial_pipeline(self):
with self.assertRaises(HTTPError):
self.do_partial_pipeline()
|
addons/box/views.py | gaybro8777/osf.io | 628 | 11139702 | <reponame>gaybro8777/osf.io
"""Views for the node settings page."""
# -*- coding: utf-8 -*-
from flask import request
from addons.base import generic_views
from addons.box.serializer import BoxSerializer
from website.project.decorators import must_have_addon, must_be_addon_authorizer
SHORT_NAME = 'box'
FULL_NAME = 'Box'
box_account_list = generic_views.account_list(
SHORT_NAME,
BoxSerializer
)
box_import_auth = generic_views.import_auth(
SHORT_NAME,
BoxSerializer
)
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
def box_folder_list(node_addon, **kwargs):
""" Returns all the subsequent folders under the folder id passed.
"""
folder_id = request.args.get('folder_id')
return node_addon.get_folders(folder_id=folder_id)
box_get_config = generic_views.get_config(
SHORT_NAME,
BoxSerializer
)
def _set_folder(node_addon, folder, auth):
uid = folder['id']
node_addon.set_folder(uid, auth=auth)
node_addon.save()
box_set_config = generic_views.set_config(
SHORT_NAME,
FULL_NAME,
BoxSerializer,
_set_folder
)
box_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
|
tensorflow_federated/python/learning/metrics/finalizer.py | tensorflow/federated | 1,918 | 11139711 | <filename>tensorflow_federated/python/learning/metrics/finalizer.py
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for creating metric finalizers."""
from typing import Any, Callable, List, Union
import tensorflow as tf
from tensorflow_federated.python.common_libs import py_typecheck
# A finalizer of a Keras metric is a `tf.function` decorated callable that takes
# in the unfinalized values of this Keras metric (i.e., the tensor values of the
# variables in `keras_metric.variables`), and returns the value of
# `keras_metric.result()`.
KerasMetricFinalizer = Callable[[List[tf.Tensor]], Any]
# TODO(b/197746608): removes the code path that takes in a constructed Keras
# metric, because reconstructing metric via `from_config` can cause problems.
def create_keras_metric_finalizer(
metric: Union[tf.keras.metrics.Metric, Callable[[],
tf.keras.metrics.Metric]]
) -> KerasMetricFinalizer:
"""Creates a finalizer function for the given Keras metric.
Args:
metric: An instance of `tf.keras.metrics.Metric` or a no-arg callable that
constructs a `tf.keras.metrics.Metric`.
Returns:
A `tf.function` decorated callable that takes in the unfinalized metric
values (i.e., tensor values of the variables in `keras_metric.variables`),
and returns the value of `keras_metric.result()`.
Raises:
TypeError: If `metric` is not a `tf.keras.metrics.Metric` and not a no-arg
callable that returns a `tf.keras.metrics.Metric`.
"""
@tf.function
def finalizer(unfinalized_metric_values: List[tf.Tensor]):
# Construct a new keras metirc here, which is necessary because this
# `tf.function` may be invoked in a different context as the `model_fn`, and
# we need the `tf.Variable`s to be created in the current scope in order to
# use `keras_metric.result()`.
with tf.init_scope():
if isinstance(metric, tf.keras.metrics.Metric):
keras_metric = type(metric).from_config(metric.get_config())
elif callable(metric):
keras_metric = metric()
if not isinstance(keras_metric, tf.keras.metrics.Metric):
raise TypeError(
'Expected input `metric` to be either a `tf.keras.metrics.Metric`'
' or a no-arg callable that creates a `tf.keras.metrics.Metric`, '
'found a callable that returns a '
f'{py_typecheck.type_string(type(keras_metric))}.')
else:
raise TypeError(
'Expected input `metric` to be either a `tf.keras.metrics.Metric` '
'or a no-arg callable that constructs a `tf.keras.metrics.Metric`, '
f'found a non-callable {py_typecheck.type_string(type(metric))}.')
py_typecheck.check_type(unfinalized_metric_values, list)
if len(keras_metric.variables) != len(unfinalized_metric_values):
raise ValueError(
'The input to the finalizer should be a list of `tf.Tensor`s matching'
f' the variables of the Keras metric {keras_metric.name}. Expected '
f'a list of `tf.Tensor`s of length {len(keras_metric.variables)}, '
f'found a list of length {len(unfinalized_metric_values)}.')
for v, a in zip(keras_metric.variables, unfinalized_metric_values):
py_typecheck.check_type(a, tf.Tensor)
if v.shape != a.shape or v.dtype != a.dtype:
raise ValueError(
'The input to the finalizer should be a list of `tf.Tensor`s '
f'matching the variables of the Keras metric {keras_metric.name}. '
f'Expected a `tf.Tensor` of shape {v.shape} and dtype {v.dtype!r}, '
f'found a `tf.Tensor` of shape {a.shape} and dtype {a.dtype!r}.')
v.assign(a)
return keras_metric.result()
return finalizer
|
thanks/thanks.py | phildini/thanks | 168 | 11139749 | <filename>thanks/thanks.py
# -*- coding: utf-8 -*-
from __future__ import print_function
from collections import namedtuple
from humanfriendly.tables import format_pretty_table
import json
import logging
import os
import requirements
import requests
import termcolor
import toml
from termcolor import colored
from . import package_tools
logger = logging.getLogger("thanks")
JSON_FILE = ("{}/thanks.json".format(os.path.dirname(os.path.realpath(__file__))))
ProjectData = namedtuple('ProjectData', ['name', 'funding_link', 'authors'])
def _get_datum(path, value, default=None):
sentinel = {}
keys = path.split(".")
for key in keys:
value = value.get(key, sentinel)
if value is sentinel:
return default
return value
class Thanks():
def __init__(self, debug=False):
self.debug = debug
self.give_thanks_to = {}
def package(self, package_name):
logger.debug("Checking {}".format(package_name))
package_data = (self._get_local_data(package_name)
or self._get_remote_data(package_name))
if package_data:
self.give_thanks_to[package_name] = package_data
def requirements_list(self, requirements_list):
print('Scanning your {} file...'.format(colored('requirements', 'red')))
reqs = [
next(requirements.parse(r))
for r in requirements_list.splitlines()
if r != ""
]
for req in reqs:
self.package(req.name)
def pipfile(self, pipfile):
project_data = toml.loads(pipfile)
reqs = []
reqs += list(project_data.get("packages", {}).keys())
reqs += list(project_data.get("dev-packages", {}).keys())
for req in reqs:
self.package(req)
def _get_local_data(self, project_name):
try:
metadata = package_tools.get_local_metadata(project_name)
funding_link = metadata.get('funding_url', '')
authors = metadata.get('author', '')
if not any([funding_link, authors]):
return None
data = ProjectData(
name=project_name,
funding_link=funding_link,
authors=authors,
)
except KeyError:
data = None
except package_tools.MetaDataNotFound:
data = None
return data
def _get_remote_data(self, project_name):
url_format = 'https://pypi.org/pypi/{}/json'.format
try:
resp = requests.get(url_format(project_name))
project_data = resp.json()
data = ProjectData(
name=project_name,
funding_link=_get_datum(
"info.project_urls.Funding", project_data, default=""),
authors=_get_datum(
"info.author", project_data, default=""),
)
except requests.exceptions.ConnectionError:
data = None
except json.decoder.JSONDecodeError:
data = None
return data
def _generate_output(self, colored_output=True):
def _uncolored(text, *args, **kwargs):
return text
colored = termcolor.colored if colored_output else _uncolored
lines = []
lines.append(
colored('You depend on {} who would {}'.format(
colored('{} authors'.format(len(self.give_thanks_to)), 'cyan'),
colored('enjoy donations!', 'green'),
),
attrs=['bold']
)
)
colorized_data = [
ProjectData(
name=colored(pd.name, 'cyan'),
funding_link=colored(pd.funding_link, 'green'),
authors=colored(pd.authors, 'yellow'),
)
for pd in self.give_thanks_to.values()
]
lines.append(format_pretty_table(
data=colorized_data,
column_names=['Project', 'Where to thank', 'Authors'],
horizontal_bar=' ',
vertical_bar=' ',
))
lines.append(
''.join([
"See projects without ",
colored("FUNDING INFORMATION", "red"),
"? Why not submit a pull request to ",
"the project asking the author to add a ",
colored("'FUNDING' PROJECT_URL ", "yellow"),
"to the project's setup.py. ",
"https://packaging.python.org/guides/",
"distributing-packages-using-setuptools/#project-urls\n"
]),
)
return '\n'.join(lines)
def __str__(self, colored_output=True):
return self._generate_output(colored_output)
|
tests/dynamic_analysis/test_run_analysis_arm.py | jrespeto/LiSa | 244 | 11139777 | """
Unit tests for full run_analysis with QEMU emulation.
"""
import os
import pytest
from lisa.analysis.dynamic_analysis import DynamicAnalyzer
from lisa.core.base import AnalyzedFile
location = os.path.dirname(__file__)
@pytest.fixture(scope='module')
def analysis():
return {'output': None}
def test_run_analysis(analysis):
sample_path = f'{location}/../binaries/testbin-puts-arm'
sample = AnalyzedFile(sample_path, '/tmp')
analyzer = DynamicAnalyzer(sample)
analyzer.run_analysis()
analysis['output'] = analyzer.output
def test_syscalls_correct(analysis):
syscall_0 = analysis['output']['syscalls'][0]
assert syscall_0['name'] == 'brk'
syscall_write = analysis['output']['syscalls'][-2]
assert syscall_write['name'] == 'write'
write_args = syscall_write['arguments']
assert write_args.startswith('1, "LiSa test.')
def test_pcap_correct(analysis):
pcap_size = os.path.getsize('/tmp/capture.pcap')
assert pcap_size != 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.