id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
4137
|
import torch
ckp_path = './checkpoints/fashion_PATN/latest_net_netG.pth'
save_path = './checkpoints/fashion_PATN_v1.0/latest_net_netG.pth'
states_dict = torch.load(ckp_path)
states_dict_new = states_dict.copy()
for key in states_dict.keys():
if "running_var" in key or "running_mean" in key:
del states_dict_new[key]
torch.save(states_dict_new, save_path)
|
4145
|
import ast
from json_codegen.generators.python3_marshmallow.utils import Annotations, class_name
class ObjectGenerator(object):
@staticmethod
def _get_property_name(node_assign):
name = node_assign.targets[0]
return name.id
@staticmethod
def _nesting_class(node_assign):
for node in ast.walk(node_assign):
if isinstance(node, ast.Call):
if node.func.attr == "Nested":
return class_name(node.args[0].id)
@staticmethod
def _non_primitive_nested_list(node_assign):
if node_assign.value.func.attr == "List":
return (
len(node_assign.value.args) > 0 and node_assign.value.args[0].func.attr == "Nested"
)
else:
return False
@staticmethod
def _init_non_primitive_nested_class(node_assign, object_, prop):
"""
If the nested list is non-primitive, initialise sub-classes in a list comp
If the nest is primitive, we can simply get it
Marshmallow will do the type marshalling
"""
return ast.ListComp(
elt=ast.Call(
func=ast.Name(id=ObjectGenerator._nesting_class(node_assign)),
args=[ast.Name(id="el")],
keywords=[],
),
generators=[
ast.comprehension(
target=ast.Name(id="el"),
iter=ast.Call(
func=ast.Attribute(value=ast.Name(id=object_), attr="get"),
args=[ast.Str(s=prop), ast.Dict(keys=[], values=[])],
keywords=[],
),
ifs=[],
is_async=0,
)
],
)
@staticmethod
def _get_key_from_object(object_, prop):
return ast.Call(
func=ast.Attribute(value=ast.Name(id=object_), attr="get"),
args=[ast.Str(s=prop)],
keywords=[],
)
@staticmethod
def _hint_required_property(node_assign, value, object_, prop):
for node in ast.walk(node_assign):
if isinstance(node, ast.keyword):
if "required" in node.arg:
value = ast.Subscript(
value=ast.Name(id=object_), slice=ast.Index(value=ast.Str(s=prop))
)
return value
@staticmethod
def _get_default_for_property(node_assign, value, object_, prop):
for node in ast.walk(node_assign):
if isinstance(node, ast.keyword) and node.arg == "required":
return value
for node in ast.walk(node_assign):
if isinstance(node, ast.keyword) and node.arg == "default":
default_value = [
keyword.value
for keyword in node_assign.value.keywords
if keyword.arg == "default"
][0]
value.args.append(default_value)
return value
else:
return value
@staticmethod
def assign_property(node_assign, object_):
"""
Required property -> self.prop = parent_dict["prop"]
Optional property -> self.prop = parent_dict.get("prop")
Primative nested list -> self.prop = parent_dict.get("prop")
Non-primative nested list -> self.props = [PropertyClass(el) for el in parent_dict.get('props', {})]
"""
prop = ObjectGenerator._get_property_name(node_assign)
if ObjectGenerator._non_primitive_nested_list(node_assign):
value = ObjectGenerator._init_non_primitive_nested_class(node_assign, object_, prop)
else:
# Assign the property as self.prop = table.get("prop")
value = ObjectGenerator._get_key_from_object(object_, prop)
# If the property is required, assign as self.prop = table["prop"]
value = ObjectGenerator._hint_required_property(node_assign, value, object_, prop)
value = ObjectGenerator._get_default_for_property(node_assign, value, object_, prop)
return ast.AnnAssign(
target=ast.Attribute(value=ast.Name(id="self"), attr=prop),
value=value,
simple=0,
annotation=Annotations(node_assign).type,
)
@staticmethod
def construct_class(schema):
name = class_name(schema.name)
name_lower = name.lower()
# Bundle function arguments and keywords
fn_arguments = ast.arguments(
args=[
ast.arg(arg="self", annotation=None),
ast.arg(arg=name_lower, annotation=ast.Name(id="dict")),
],
vararg=None,
kwarg=None,
kwonlyargs=[],
kw_defaults=[],
defaults=[],
)
fn_body = [
ObjectGenerator.assign_property(node, name_lower)
for node in schema.body
if isinstance(node, ast.Assign)
]
# pass if no Assign nodes
if len(fn_body) == 0:
fn_body = [ast.Pass()]
# Generate class constructor
class_body = [
ast.FunctionDef(
name="__init__", args=fn_arguments, body=fn_body, decorator_list=[], returns=None
),
ObjectGenerator._construct_to_("json")(schema),
ObjectGenerator._construct_to_("dict")(schema),
ObjectGenerator.construct_from_json(schema),
]
return ast.ClassDef(name=name, bases=[], body=class_body, decorator_list=[], keywords=[])
@staticmethod
def _construct_to_(output):
if output == "json":
method = "dumps"
elif output == "dict":
method = "dump"
else:
raise NotImplementedError("Only deserialisation to json or dict supported")
def _construct_to_helper(schema):
fn_args = ast.arguments(
args=[ast.arg(arg="self", annotation=None)],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[],
)
fn_body = [
ast.Return(
value=ast.Attribute(
value=ast.Call(
func=ast.Attribute(
value=ast.Call(
func=ast.Name(id=schema.name),
args=[],
keywords=[
ast.keyword(
arg="strict", value=ast.NameConstant(value=True)
)
],
),
attr=method,
),
args=[ast.Name(id="self")],
keywords=[],
),
attr="data",
)
)
]
return ast.FunctionDef(
name=f"to_{output}", args=fn_args, body=fn_body, decorator_list=[], returns=None
)
return _construct_to_helper
@staticmethod
def construct_from_json(schema):
fn_args = ast.arguments(
args=[
ast.arg(arg="json", annotation=ast.Name(id="str")),
ast.arg(arg="only", annotation=None),
],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[ast.NameConstant(value=None)],
)
fn_body = [
ast.Return(
ast.Attribute(
value=ast.Call(
func=ast.Attribute(
value=ast.Call(
func=ast.Name(id=schema.name),
args=[],
keywords=[
ast.keyword(arg="strict", value=ast.NameConstant(value=True)),
ast.keyword(arg="only", value=ast.Name(id="only")),
],
),
attr="loads",
),
args=[ast.Name(id="json")],
keywords=[],
),
attr="data",
)
)
]
return ast.FunctionDef(
name="from_json",
args=fn_args,
body=fn_body,
decorator_list=[ast.Name(id="staticmethod")],
returns=None,
)
|
4186
|
import json
from wptserve.utils import isomorphic_decode
def main(request, response):
origin = request.GET.first(b"origin", request.headers.get(b'origin') or b'none')
if b"check" in request.GET:
token = request.GET.first(b"token")
value = request.server.stash.take(token)
if value is not None:
if request.GET.first(b"check", None) == b"keep":
request.server.stash.put(token, value)
body = u"1"
else:
body = u"0"
return [(b"Content-Type", b"text/plain")], body
if origin != b'none':
response.headers.set(b"Access-Control-Allow-Origin", origin)
if b'origin2' in request.GET:
response.headers.append(b"Access-Control-Allow-Origin", request.GET.first(b'origin2'))
#Preflight
if b'headers' in request.GET:
response.headers.set(b"Access-Control-Allow-Headers", request.GET.first(b'headers'))
if b'credentials' in request.GET:
response.headers.set(b"Access-Control-Allow-Credentials", request.GET.first(b'credentials'))
if b'methods' in request.GET:
response.headers.set(b"Access-Control-Allow-Methods", request.GET.first(b'methods'))
code_raw = request.GET.first(b'code', None)
if code_raw:
code = int(code_raw)
else:
code = None
if request.method == u'OPTIONS':
#Override the response code if we're in a preflight and it's asked
if b'preflight' in request.GET:
code = int(request.GET.first(b'preflight'))
#Log that the preflight actually happened if we have an ident
if b'token' in request.GET:
request.server.stash.put(request.GET[b'token'], True)
if b'location' in request.GET:
if code is None:
code = 302
if code >= 300 and code < 400:
response.headers.set(b"Location", request.GET.first(b'location'))
headers = {}
for name, values in request.headers.items():
if len(values) == 1:
headers[isomorphic_decode(name)] = isomorphic_decode(values[0])
else:
#I have no idea, really
headers[name] = values
headers[u'get_value'] = isomorphic_decode(request.GET.first(b'get_value', b''))
body = json.dumps(headers)
if code:
return (code, b"StatusText"), [], body
else:
return body
|
4206
|
import pandas as pd
from pandas import DataFrame
df = pd.read_csv('sp500_ohlc.csv', index_col = 'Date', parse_dates=True)
df['H-L'] = df.High - df.Low
# Giving us count (rows), mean (avg), std (standard deviation for the entire
# set), minimum for the set, maximum for the set, and some %s in that range.
print( df.describe())
x = input('enter to cont')
# gives us correlation data. Remember the 3d chart we plotted?
# now you can see if correlation of H-L and Volume also is correlated
# with price swings. Correlations for your correlations
print( df.corr())
x = input('enter to cont')
# covariance... now plenty of people know what correlation is, but what in the
# heck is covariance.
# Let's defined the two.
# covariance is the measure of how two variables change together.
# correlation is the measure of how two variables move in relation to eachother.
# so covariance is a more direct assessment of the relationship between two variables.
# Maybe a better way to put it is that covariance is the measure of the strength of correlation.
print( df.cov())
x = input('enter to cont')
print( df[['Volume','H-L']].corr())
x = input('enter to cont')
# see how it makes a table?
# so now, we can actually perform a service that some people actually pay for
# I once had a short freelance gig doing this
# so a popular form of analysis within especially forex is to compare correlations between
# the currencies. The idea here is that you pace one currency with another.
#
import datetime
import pandas.io.data
C = pd.io.data.get_data_yahoo('C',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
AAPL = pd.io.data.get_data_yahoo('AAPL',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
MSFT = pd.io.data.get_data_yahoo('MSFT',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
TSLA = pd.io.data.get_data_yahoo('TSLA',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
print( C.head())
x = input('enter to cont')
del C['Open']
# , 'high', 'low', 'close', 'volume'
del C['High']
del C['Low']
del C['Close']
del C['Volume']
corComp = C
corComp.rename(columns={'Adj Close': 'C'}, inplace=True)
corComp['AAPL'] = AAPL['Adj Close']
corComp['MSFT'] = MSFT['Adj Close']
corComp['TSLA'] = TSLA['Adj Close']
print( corComp.head())
x = input('enter to cont')
print( corComp.corr())
x = input('enter to cont')
C = pd.io.data.get_data_yahoo('C',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
AAPL = pd.io.data.get_data_yahoo('AAPL',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
MSFT = pd.io.data.get_data_yahoo('MSFT',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
TSLA = pd.io.data.get_data_yahoo('TSLA',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
BAC = pd.io.data.get_data_yahoo('BAC',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
BBRY = pd.io.data.get_data_yahoo('BBRY',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
CMG = pd.io.data.get_data_yahoo('CMG',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
EBAY = pd.io.data.get_data_yahoo('EBAY',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
JPM = pd.io.data.get_data_yahoo('JPM',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
SBUX = pd.io.data.get_data_yahoo('SBUX',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
TGT = pd.io.data.get_data_yahoo('TGT',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
WFC = pd.io.data.get_data_yahoo('WFC',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
x = input('enter to cont')
print( C.head())
del C['Open']
# , 'high', 'low', 'close', 'volume'
del C['High']
del C['Low']
del C['Close']
del C['Volume']
corComp = C
corComp.rename(columns={'Adj Close': 'C'}, inplace=True)
corComp['BAC'] = BAC['Adj Close']
corComp['MSFT'] = MSFT['Adj Close']
corComp['TSLA'] = TSLA['Adj Close']
corComp['AAPL'] = AAPL['Adj Close']
corComp['BBRY'] = BBRY['Adj Close']
corComp['CMG'] = CMG['Adj Close']
corComp['EBAY'] = EBAY['Adj Close']
corComp['JPM'] = JPM['Adj Close']
corComp['SBUX'] = SBUX['Adj Close']
corComp['TGT'] = TGT['Adj Close']
corComp['WFC'] = WFC['Adj Close']
print( corComp.head())
x = input('enter to cont')
print( corComp.corr())
x = input('enter to cont')
fancy = corComp.corr()
fancy.to_csv('bigmoney.csv')
|
4227
|
from django.apps import apps
from django.test import override_settings
from wagtail_live.signals import live_page_update
def test_live_page_update_signal_receivers():
assert len(live_page_update.receivers) == 0
@override_settings(
WAGTAIL_LIVE_PUBLISHER="tests.testapp.publishers.DummyWebsocketPublisher"
)
def test_live_page_update_signal_receivers_websocket():
app_config = apps.get_app_config("wagtail_live")
app_config.ready()
try:
# Receiver should be connected, no IndexError
receiver = live_page_update.receivers[0]
finally:
live_page_update.disconnect(receiver)
|
4236
|
import numpy as np
import pytest
from pytest import approx
from pymt.component.grid import GridMixIn
class Port:
def __init__(self, name, uses=None, provides=None):
self._name = name
self._uses = uses or []
self._provides = provides or []
def get_component_name(self):
return self._name
def get_input_item_count(self):
return len(self._uses)
def get_input_item_list(self):
return self._uses
def get_output_item_count(self):
return len(self._provides)
def get_output_item_list(self):
return self._provides
def test_exchange_items():
class Component(GridMixIn):
def __init__(self):
self._port = Port("test", uses=["invar"], provides=["outvar"])
super().__init__()
c = Component()
assert c.input_items == ["invar"]
assert c.output_items == ["outvar"]
def test_no_exchange_items():
class Component(GridMixIn):
def __init__(self):
self._port = Port("test")
super().__init__()
c = Component()
assert c.input_items == []
assert c.output_items == []
def test_raster_1d():
class RasterPort(Port):
def get_grid_shape(self, grid_id):
return (3,)
def get_grid_spacing(self, grid_id):
return (2.0,)
def get_grid_origin(self, grid_id):
return (3.0,)
class Component(GridMixIn):
def __init__(self):
self._port = RasterPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_x("invar") == approx(np.array([3.0, 5.0, 7.0]))
def test_raster_2d():
class RasterPort(Port):
def get_grid_shape(self, grid_id):
return (2, 3)
def get_grid_spacing(self, grid_id):
return (2.0, 1.0)
def get_grid_origin(self, grid_id):
return (0.0, 0.0)
class Component(GridMixIn):
def __init__(self):
self._port = RasterPort("test-2d", uses=["invar"], provides=["outvar"])
super().__init__()
c = Component()
assert c.name == "test-2d"
assert c.get_grid_type(0) == "RASTER"
assert c.get_x(0) == approx(np.array([[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]]))
assert c.get_y(0) == approx(np.array([[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]]))
assert np.all(c.get_connectivity(0) == np.array([0, 1, 4, 3, 1, 2, 5, 4]))
assert np.all(c.get_offset(0) == np.array([4, 8]))
def test_raster_3d():
class RasterPort(Port):
def get_grid_shape(self, grid_id):
return (2, 2, 3)
def get_grid_spacing(self, grid_id):
return (1.0, 2.0, 1.0)
def get_grid_origin(self, grid_id):
return (0.0, 0.0, 0.0)
class Component(GridMixIn):
def __init__(self):
self._port = RasterPort("test-3d", uses=["invar"])
super().__init__()
c = Component()
assert c.get_x(0) == approx(
np.array(
[[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]], [[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]]]
)
)
assert c.get_y(0) == approx(
np.array(
[[[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], [[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]]]
)
)
assert c.get_z(0) == approx(
np.array(
[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]
)
)
def test_rectilinear():
class RectilinearPort(Port):
def get_grid_shape(self, grid_id):
return (2, 3)
def get_grid_x(self, grid_id):
return (0.0, 3.0, 4)
def get_grid_y(self, grid_id):
return (2.0, 7.0)
class Component(GridMixIn):
def __init__(self):
self._port = RectilinearPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "RECTILINEAR"
assert c.get_x(0) == approx(np.array([[0.0, 3.0, 4.0], [0.0, 3.0, 4.0]]))
assert c.get_y(0) == approx(np.array([[2.0, 2.0, 2.0], [7.0, 7.0, 7.0]]))
def test_structured():
class StructuredPort(Port):
def get_grid_shape(self, grid_id):
return (2, 3)
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 2.0, 0.0, 1.0, 2.0])
def get_grid_y(self, grid_id):
return np.array([0.0, 1.0, 2.0, 1.0, 2.0, 3.0])
class Component(GridMixIn):
def __init__(self):
self._port = StructuredPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "STRUCTURED"
assert c.get_x(0) == approx(np.array([0.0, 1.0, 2.0, 0.0, 1.0, 2.0]))
assert c.get_y(0) == approx(np.array([0.0, 1.0, 2.0, 1.0, 2.0, 3.0]))
def test_unstructured():
class UnstructuredPort(Port):
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 0.0, 1.0, 2.0])
def get_grid_y(self, grid_id):
return np.array([0.0, 0.0, 1.0, 1.0, 0.0])
def get_grid_connectivity(self, grid_id):
return np.array([0, 1, 3, 2, 4, 3, 1])
def get_grid_offset(self, grid_id):
return np.array([4, 7])
class Component(GridMixIn):
def __init__(self):
self._port = UnstructuredPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "UNSTRUCTURED"
assert c.get_x(0) == approx(np.array([0.0, 1.0, 0.0, 1.0, 2.0]))
assert c.get_y(0) == approx(np.array([0.0, 0.0, 1.0, 1.0, 0.0]))
def test_get_grid_shape_is_none():
class UnstructuredPort(Port):
def get_grid_shape(self, grid_id):
return None
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 2.0])
class Component(GridMixIn):
def __init__(self):
self._port = UnstructuredPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "UNSTRUCTURED"
def test_get_grid_shape_raises():
class UnstructuredPort(Port):
def get_grid_shape(self, grid_id):
raise NotImplementedError("get_grid_shape")
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 2.0])
class Component(GridMixIn):
def __init__(self):
self._port = UnstructuredPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "UNSTRUCTURED"
def test_structured_1d():
class RectilinearPort(Port):
def get_grid_shape(self, grid_id):
return (2, 3)
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 2.0])
def get_grid_y(self, grid_id):
raise NotImplementedError("get_grid_y")
def get_grid_z(self, grid_id):
raise NotImplementedError("get_grid_z")
class Component(GridMixIn):
def __init__(self):
self._port = RectilinearPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "RECTILINEAR"
with pytest.raises(IndexError):
c.get_z(0)
|
4265
|
import argparse
import warnings
warnings.simplefilter("ignore", UserWarning)
import files
from tensorboardX import SummaryWriter
import os
import numpy as np
import time
import torch
import torch.optim
import torch.nn as nn
import torch.utils.data
import torchvision
import torchvision.transforms as tfs
from data import DataSet,return_model_loader
from util import weight_init, write_conv, setup_runtime, AverageMeter, MovingAverage
def RotationDataLoader(image_dir, is_validation=False,
batch_size=256, crop_size=224, num_workers=4,shuffle=True):
normalize = tfs.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transforms = tfs.Compose([
tfs.RandomResizedCrop(crop_size),
tfs.RandomGrayscale(p=0.2),
tfs.ColorJitter(0.4, 0.4, 0.4, 0.4),
tfs.RandomHorizontalFlip(),
tfs.Lambda(lambda img: torch.stack([normalize(tfs.ToTensor()(
tfs.functional.rotate(img, angle))) for angle in [0, 90, 180, 270]]
))
])
if is_validation:
dataset = DataSet(torchvision.datasets.ImageFolder(image_dir + '/val', transforms))
else:
dataset = DataSet(torchvision.datasets.ImageFolder(image_dir + '/train', transforms))
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=True,
drop_last=False
)
return loader
class Optimizer:
def __init__(self):
self.num_epochs = 30
self.lr = 0.05
self.lr_schedule = lambda epoch: (self.lr * (0.1 ** (epoch//args.lrdrop)))*(epoch<80) + (epoch>=80)*self.lr*(0.1**3)
self.momentum = 0.9
self.weight_decay = 10**(-5)
self.resume = True
self.checkpoint_dir = None
self.writer = None
self.K = args.ncl
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.val_loader = RotationDataLoader(args.imagenet_path, is_validation=True,
batch_size=args.batch_size, num_workers=args.workers,shuffle=True)
def optimize_epoch(self, model, optimizer, loader, epoch, validation=False):
print(f"Starting epoch {epoch}, validation: {validation} " + "="*30)
loss_value = AverageMeter()
rotacc_value = AverageMeter()
# house keeping
if not validation:
model.train()
lr = self.lr_schedule(epoch)
for pg in optimizer.param_groups:
pg['lr'] = lr
else:
model.eval()
XE = torch.nn.CrossEntropyLoss().to(self.dev)
l_dl = 0 # len(loader)
now = time.time()
batch_time = MovingAverage(intertia=0.9)
for iter, (data, label, selected) in enumerate(loader):
now = time.time()
if not validation:
niter = epoch * len(loader.dataset) + iter*args.batch_size
data = data.to(self.dev)
mass = data.size(0)
where = np.arange(mass,dtype=int) * 4
data = data.view(mass * 4, 3, data.size(3), data.size(4))
rotlabel = torch.tensor(range(4)).view(-1, 1).repeat(mass, 1).view(-1).to(self.dev)
#################### train CNN ###########################################
if not validation:
final = model(data)
if args.onlyrot:
loss = torch.Tensor([0]).to(self.dev)
else:
if args.hc == 1:
loss = XE(final[0][where], self.L[selected])
else:
loss = torch.mean(torch.stack([XE(final[k][where], self.L[k, selected]) for k in range(args.hc)]))
rotloss = XE(final[-1], rotlabel)
pred = torch.argmax(final[-1], 1)
total_loss = loss + rotloss
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
correct = (pred == rotlabel).to(torch.float)
rotacc = correct.sum() / float(mass)
else:
final = model(data)
pred = torch.argmax(final[-1], 1)
correct = (pred == rotlabel.cuda()).to(torch.float)
rotacc = correct.sum() / float(mass)
total_loss = torch.Tensor([0])
loss = torch.Tensor([0])
rotloss = torch.Tensor([0])
rotacc_value.update(rotacc.item(), mass)
loss_value.update(total_loss.item(), mass)
batch_time.update(time.time() - now)
now = time.time()
print(
f"Loss: {loss_value.avg:03.3f}, RotAcc: {rotacc_value.avg:03.3f} | {epoch: 3}/{iter:05}/{l_dl:05} Freq: {mass / batch_time.avg:04.1f}Hz:",
end='\r', flush=True)
# every few iter logging
if (iter % args.logiter == 0):
if not validation:
print(niter, " Loss: {0:.3f}".format(loss.item()), flush=True)
with torch.no_grad():
if not args.onlyrot:
pred = torch.argmax(final[0][where], dim=1)
pseudoloss = XE(final[0][where], pred)
if not args.onlyrot:
self.writer.add_scalar('Pseudoloss', pseudoloss.item(), niter)
self.writer.add_scalar('lr', self.lr_schedule(epoch), niter)
self.writer.add_scalar('Loss', loss.item(), niter)
self.writer.add_scalar('RotLoss', rotloss.item(), niter)
self.writer.add_scalar('RotAcc', rotacc.item(), niter)
if iter > 0:
self.writer.add_scalar('Freq(Hz)', mass/(time.time() - now), niter)
# end of epoch logging
if self.writer and (epoch % self.log_interval == 0):
write_conv(self.writer, model, epoch)
if validation:
print('val Rot-Acc: ', rotacc_value.avg)
self.writer.add_scalar('val Rot-Acc', rotacc_value.avg, epoch)
files.save_checkpoint_all(self.checkpoint_dir, model, args.arch,
optimizer, self.L, epoch,lowest=False)
return {'loss': loss_value.avg}
def optimize(self, model, train_loader):
"""Perform full optimization."""
first_epoch = 0
model = model.to(self.dev)
self.optimize_times = [0]
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()),
weight_decay=self.weight_decay,
momentum=self.momentum,
lr=self.lr)
if self.checkpoint_dir is not None and self.resume:
self.L, first_epoch = files.load_checkpoint_all(self.checkpoint_dir, model=None, opt=None)
print('loaded from: ', self.checkpoint_dir,flush=True)
print('first five entries of L: ', self.L[:5], flush=True)
print('found first epoch to be', first_epoch, flush=True)
first_epoch = 0
self.optimize_times = [0]
self.L = self.L.cuda()
print("model.headcount ", model.headcount, flush=True)
#####################################################################################
# Perform optmization ###############################################################
lowest_loss = 1e9
epoch = first_epoch
while epoch < (self.num_epochs+1):
if not args.val_only:
m = self.optimize_epoch(model, optimizer, train_loader, epoch, validation=False)
if m['loss'] < lowest_loss:
lowest_loss = m['loss']
files.save_checkpoint_all(self.checkpoint_dir, model, args.arch,
optimizer, self.L, epoch, lowest=True)
else:
print('='*30 +' doing only validation ' + "="*30)
epoch = self.num_epochs
m = self.optimize_epoch(model, optimizer, self.val_loader, epoch, validation=True)
epoch += 1
print(f"Model optimization completed. Saving final model to {os.path.join(self.checkpoint_dir, 'model_final.pth.tar')}")
torch.save(model, os.path.join(self.checkpoint_dir, 'model_final.pth.tar'))
return model
def get_parser():
parser = argparse.ArgumentParser(description='Retrain with given labels combined with RotNet loss')
# optimizer
parser.add_argument('--epochs', default=90, type=int, metavar='N', help='number of epochs')
parser.add_argument('--batch-size', default=64, type=int, metavar='BS', help='batch size')
parser.add_argument('--lr', default=0.05, type=float, metavar='FLOAT', help='initial learning rate')
parser.add_argument('--lrdrop', default=30, type=int, metavar='INT', help='multiply LR by 0.1 every')
# architecture
parser.add_argument('--arch', default='alexnet', type=str, help='alexnet or resnet')
parser.add_argument('--archspec', default='big', type=str, help='big or small for alexnet ')
parser.add_argument('--ncl', default=1000, type=int, metavar='INT', help='number of clusters')
parser.add_argument('--hc', default=1, type=int, metavar='INT', help='number of heads')
parser.add_argument('--init', default=False, action='store_true', help='initialization of network to PyTorch 0.4')
# what we do in this code
parser.add_argument('--val-only', default=False, action='store_true', help='if we run only validation set')
parser.add_argument('--onlyrot', default=False, action='store_true', help='if train only RotNet')
# housekeeping
parser.add_argument('--data', default="Imagenet", type=str)
parser.add_argument('--device', default="0", type=str, metavar='N', help='GPU device')
parser.add_argument('--exp', default='./rot-retrain', metavar='DIR', help='path to result dirs')
parser.add_argument('--workers', default=6, type=int, metavar='N', help='number workers (default: 6)')
parser.add_argument('--imagenet-path', default='/home/ubuntu/data/imagenet', type=str, help='')
parser.add_argument('--comment', default='rot-retrain', type=str, help='comment for tensorboardX')
parser.add_argument('--log-interval', default=1, type=int, metavar='INT', help='save stuff every x epochs')
parser.add_argument('--logiter', default=200, type=int, metavar='INT', help='log every x-th batch')
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
name = "%s" % args.comment.replace('/', '_')
try:
args.device = [int(item) for item in args.device.split(',')]
except AttributeError:
args.device = [int(args.device)]
setup_runtime(seed=42, cuda_dev_id=args.device)
print(args, flush=True)
print()
print(name,flush=True)
writer = SummaryWriter('./runs/%s/%s'%(args.data,name))
writer.add_text('args', " \n".join(['%s %s' % (arg, getattr(args, arg)) for arg in vars(args)]))
# Setup model and train_loader
print('Commencing!', flush=True)
model, train_loader = return_model_loader(args)
train_loader = RotationDataLoader(args.imagenet_path, is_validation=False,
crop_size=224, batch_size=args.batch_size, num_workers=args.workers,
shuffle=True)
# add additional head to the network for RotNet loss.
if args.arch == 'alexnet':
if args.hc == 1:
model.__setattr__("top_layer0", nn.Linear(4096, args.ncl))
model.top_layer = None
model.headcount = args.hc+1
model.__setattr__("top_layer%s" % args.hc, nn.Linear(4096, 4))
else:
if args.hc == 1:
model.__setattr__("top_layer0", nn.Linear(2048*int(args.archspec), args.ncl))
model.top_layer = None
model.headcount = args.hc+1
model.__setattr__("top_layer%s" % args.hc, nn.Linear(2048*int(args.archspec), 4))
if args.init:
for mod in model.modules():
mod.apply(weight_init)
# Setup optimizer
o = Optimizer()
o.writer = writer
o.lr = args.lr
o.num_epochs = args.epochs
o.resume = True
o.log_interval = args.log_interval
o.checkpoint_dir = os.path.join(args.exp, 'checkpoints')
# Optimize
o.optimize(model, train_loader)
|
4273
|
from aiohttp_admin2.mappers import Mapper
from aiohttp_admin2.mappers import fields
class FloatMapper(Mapper):
field = fields.FloatField()
def test_correct_float_type():
"""
In this test we check success convert to float type.
"""
mapper = FloatMapper({"field": 1})
mapper.is_valid()
assert mapper.data["field"] == 1.0
mapper = FloatMapper({"field": 2})
mapper.is_valid()
assert mapper.data["field"] == 2.0
mapper = FloatMapper({"field": -3})
mapper.is_valid()
assert mapper.data["field"] == -3.0
mapper = FloatMapper({"field": 0})
mapper.is_valid()
assert mapper.data["field"] == 0.0
def test_wrong_float_type():
"""
In this test we check error when we received wrong float type.
"""
assert FloatMapper({"field": "string"}).is_valid() is False
assert FloatMapper({"field": []}).is_valid() is False
|
4308
|
import pathlib
import yaml
documentations = {"Our Platform": "QuantConnect-Platform-2.0.0.yaml",
"Alpha Streams": "QuantConnect-Alpha-0.8.yaml"}
def RequestTable(api_call, params):
writeUp = '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="2"><code>{api_call}</code> Method</th>\n</tr>\n</thead>'
example = '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n{\n'
for item in params:
example_ = "/"
description_ = "Optional. " if "required" not in item or not item["required"] else ""
description_ += item["description"]
if description_[-1] != ".":
description_ += "."
if "type" in item["schema"]:
type_ = item["schema"]["type"]
else:
type_ = item["schema"]["$ref"].split("/")[-1]
if "minimum" in item["schema"]:
description_ += f' Minimum: {item["schema"]["minimum"]}'
example_ = item["schema"]["minimum"]
elif "maximum" in item["schema"]:
description_ += f' Maximum: {item["schema"]["maximum"]}'
example_ = item["schema"]["maximum"]
elif "default" in item["schema"]:
description_ += f' Default: {item["schema"]["default"]}'
example_ = item["schema"]["default"]
if type_ == "array":
array_obj = item["schema"]["items"]
if "$ref" in array_obj:
type_ = array_obj["$ref"].split("/")[-1] + " Array"
ref = array_obj["$ref"].split("/")[1:]
type_ = ref[-1] + " Array"
request_object_ = doc
for path in ref:
request_object_ = request_object_[path]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
example_, __, __ = ExampleWriting(request_object_properties_, [], 1)
if "type" in array_obj:
type_ = array_obj["type"] + " Array"
if "enum" in array_obj:
type_ = type_ + " Enum"
description_ += f' Options: {str(array_obj["enum"])}'
example_ = f'"{array_obj["enum"][0]}"'
if "Enum" not in type_:
if "string" in type_:
example_ = '"string"'
elif "number" in type_ or "integer" in type_:
example_ = '0'
elif "boolean" in type_:
example_ = 'true'
writeUp += f'\n<tr>\n<td width="20%">{item["name"]}</td> <td> <code>{type_}</code><br/>{description_}</td>\n</tr>'
example += f' "{item["name"]}": {example_},\n'
return writeUp + example + "\b}</pre>\n</div>\n</td>\n</tr>\n</table>"
def ResponseTable(requestBody):
writeUp = ""
array = False
order = 0
if "content" in requestBody:
component = requestBody["content"]["application/json"]["schema"]
if "$ref" in component:
component = component["$ref"].split("/")[1:]
elif "items" in component and "$ref" in component["items"]:
component = component["items"]["$ref"].split("/")[1:]
array = True
order += 1
else:
writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="2">{requestBody["description"]}</th>\n'
writeUp += '</tr>\n</thead>\n'
writeUp += f'<tr>\n<td width="20%">value</td> <td> <code>{component["items"]["type"]}</code> <br/>/</td>\n</tr>\n'
writeUp += '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n'
writeUp += f'[\n "{component["items"]["example"]}"\n]'
writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>'
return writeUp
else:
component = requestBody["$ref"].split("/")[1:]
item_list = [component]
i = 0
while i < len(item_list):
request_object = doc
for item in item_list[i]:
request_object = request_object[item]
if "items" in request_object and "oneOf" in request_object["items"]:
prop = request_object["items"]["oneOf"]
example = '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n[\n ['
writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="2"><code>{item}</code> Model - {request_object["description"]}</th>\n'
writeUp += '</tr>\n</thead>'
for y in prop:
path = y["$ref"].split("/")[1:]
name = path[-1]
enum = ""
item_list.append(path)
request_object = doc
for item in path:
request_object = request_object[item]
if "enum" in request_object:
enum = " Options: " + str(request_object["enum"])
description_ = request_object["description"]
if description_[-1] != ".":
description_ += "."
writeUp += f'\n<tr>\n<td width="20%">{name}</td> <td> <code>{request_object["type"]}</code> <br/> {description_ + enum}</td>\n</tr>\n'
if "example" in request_object:
text = request_object["example"]
elif "enum" in request_object:
text = '"' + request_object["enum"][0] + '"'
example += f'\n {text},'
example += '\b\n ]\n]'
writeUp += example
writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>'
i += 1
continue
elif "oneOf" in request_object:
for y in request_object["oneOf"]:
item_list.append(y["$ref"].split("/")[1:])
i += 1
continue
elif "properties" in request_object:
request_object_properties = request_object["properties"]
elif "content" in request_object:
item_list.append(request_object["content"]["application/json"]["schema"]["$ref"].split("/")[1:])
i += 1
continue
elif "type" in request_object and "properties" not in request_object:
request_object_properties = {item: request_object}
writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n'
if "description" in request_object:
writeUp += f'<th colspan="2"><code>{item_list[i][-1]}</code> Model - {request_object["description"]}</th>\n'
else:
writeUp += f'<th colspan="2"><code>{item_list[i][-1]}</code> Model</th>\n'
writeUp += '</tr>\n</thead>\n'
example, html_property, item_list = ExampleWriting(request_object_properties, item_list, array, order)
if array:
array = False
order -= 1
for line in html_property:
writeUp += line
writeUp += '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n'
writeUp += example
writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>'
i += 1
return writeUp
def ExampleWriting(request_object_properties, item_list, array=False, order=0):
tab = " " * order
if array:
example = "[\n {\n"
else:
example = "{\n"
line = []
for name, properties in request_object_properties.items():
type_ = properties["type"] if "type" in properties else "object"
description_ = properties["description"] if "description" in properties else "/"
if (example != "{\n" and not array) or (example != "[\n {\n" and array):
example += ",\n"
example_ = tab + f' "{name}": '
if type_ == "array":
example_ += '[\n'
if "type" in properties["items"]:
type_ = properties["items"]["type"] + " Array"
example_ += tab + f' "{properties["items"]["type"]}"'
elif "$ref" in properties["items"]:
ref = properties["items"]["$ref"].split("/")[1:]
type_ = ref[-1] + " Array"
if ref not in item_list:
item_list.append(ref)
request_object_ = doc
for item in ref:
request_object_ = request_object_[item]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+2)
example_ += tab + " " * 2 + write_up
elif type_ == "object":
if "additionalProperties" in properties:
add_prop = properties["additionalProperties"]
if "type" in add_prop:
prop_type = add_prop["type"]
if "format" in prop_type:
type_ = prop_type + f'$({prop_type["format"]})' + " object"
if prop_type["format"] == "date-time":
example_ += "2021-11-26T15:18:27.693Z"
else:
example_ += "0"
else:
type_ = prop_type + " object"
example_ += f'"{prop_type}"'
elif "$ref" in add_prop:
ref = add_prop["$ref"].split("/")[1:]
type_ = ref[-1] + " object"
if ref not in item_list:
item_list.append(ref)
request_object_ = doc
for item in ref:
request_object_ = request_object_[item]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+1)
example_ += write_up
elif "$ref" in properties:
ref = properties["$ref"].split("/")[1:]
type_ = ref[-1] + " object"
if ref not in item_list:
item_list.append(ref)
request_object_ = doc
for item in ref:
request_object_ = request_object_[item]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
description_ = request_object_["description"] if "description" in request_object_ else "/"
write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+1)
example_ += write_up
elif "type" in request_object_:
properties = request_object_properties_ = request_object_
type_ = request_object_["type"]
description_ = request_object_["description"] if "description" in request_object_ else "/"
elif type_ == "integer" or type_ == "number":
example_ += "0"
elif type_ == "boolean":
example_ += "true"
elif type_ == "string":
if "format" in properties:
type_ += f'(${properties["format"]})'
example_ += "2021-11-26T15:18:27.693Z"
else:
example_ += '"string"'
if description_[-1] != ".":
description_ += "."
if "enum" in properties:
type_ += " Enum"
description_ += f' Options : {properties["enum"]}'
if "string" in type_:
example_ = tab + f' "{name}": "{properties["enum"][0]}"'
else:
example_ = tab + f' "{name}": {properties["enum"][0]}'
if "example" in properties:
eg = properties["example"]
type_ += f'<br/><i><sub>example: {eg}</sub></i>'
if isinstance(eg, str):
eg = '"' + eg + '"'
example_ = tab + f' "{name}": {eg}'
if "Array" in type_:
example_ += "\n" + tab + " ]"
if order == 0 or array:
line.append(f'<tr>\n<td width="20%">{name}</td> <td> <code>{type_}</code> <br/> {description_}</td>\n</tr>\n')
example += example_
if not array:
return example + "\n" + tab + "}", line, item_list
return example + "\n" + tab + "}\n" + " " * (order-1) + "]", line, item_list
for section, source in documentations.items():
yaml_file = open(source)
doc = yaml.load(yaml_file, Loader=yaml.Loader)
paths = doc["paths"]
for api_call, result in paths.items():
j = 1
content = result["post"] if "post" in result else result["get"]
# Create path if not exist
destination_folder = pathlib.Path("/".join(content["tags"]))
destination_folder.mkdir(parents=True, exist_ok=True)
# Create Introduction part
with open(destination_folder / f'{j:02} Introduction.html', "w") as html_file:
html_file.write("<p>\n")
html_file.write(f"{content['summary']}\n")
html_file.write("</p>\n")
j += 1
# Create Description part if having one
if "description" in content:
with open(destination_folder / f'{j:02} Description.html', "w") as html_file:
html_file.write('<p>\n')
html_file.write(f'{content["description"]}\n')
html_file.write('</p>\n')
j += 1
# Create Request part
with open(destination_folder / f'{j:02} Request.html', "w") as html_file:
description_ = ""
if "parameters" in content:
writeUp = RequestTable(api_call, content["parameters"])
elif "requestBody" in content:
if "description" in content["requestBody"]:
description_ = str(content["requestBody"]["description"])
if description_[-1] != ".":
description_ += "."
description_ += " "
writeUp = ResponseTable(content["requestBody"])
else:
writeUp = '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="1"><code>{api_call}</code> Method</th>\n</tr>\n</thead>\n'
writeUp += f'</tr>\n<td><code>{api_call}</code> method takes no parameters.</td>\n</tr>\n</table>'
description_ += f'The <code>{api_call}</code> API accepts requests in the following format:\n'
html_file.write("<p>\n" + description_ + "</p>\n")
html_file.write(writeUp)
j += 1
# Create Response part
with open(destination_folder / f'{j:02} Responses.html', "w") as html_file:
html_file.write('<p>\n')
html_file.write(f'The <code>{api_call}</code> API provides a response in the following format:\n')
html_file.write('</p>\n')
request_body = content["responses"]
for code, properties in request_body.items():
if code == "200":
html_file.write('<h4>200 Success</h4>\n')
elif code == "401":
html_file.write('<h4>401 Authentication Error</h4>\n<table class="table qc-table">\n<thead>\n<tr>\n')
html_file.write('<th colspan="2"><code>UnauthorizedError</code> Model - Unauthorized response from the API. Key is missing, invalid, or timestamp is too old for hash.</th>\n')
html_file.write('</tr>\n</thead>\n<tr>\n<td width="20%">www_authenticate</td> <td> <code>string</code> <br/> Header</td>\n</tr>\n</table>\n')
continue
elif code == "404":
html_file.write('<h4>404 Not Found Error</h4>\n')
html_file.write('<p>The requested item, index, page was not found.</p>\n')
continue
elif code == "default":
html_file.write('<h4>Default Generic Error</h4>\n')
writeUp = ResponseTable(properties)
html_file.write(writeUp)
print(f"Documentation of {section} is generated and inplace!")
|
4310
|
from django.conf.urls import url
# from .views import BaseIndexView
urlpatterns = [
# url(r'^$', BaseIndexView.as_view(), name="index"),
]
|
4312
|
import glob
import logging
import os
import warnings
import pytest
from _pytest.outcomes import Failed
from _pytest.reports import TestReport
from .broker_pact import BrokerPact, BrokerPacts, PactBrokerConfig
from .result import PytestResult, log
def pytest_addoption(parser):
group = parser.getgroup("pact specific options (pactman)")
group.addoption(
"--pact-files", default=None, help="pact JSON files to verify (wildcards allowed)"
)
group.addoption("--pact-broker-url", default="", help="pact broker URL")
group.addoption("--pact-broker-token", default="", help="pact broker bearer token")
group.addoption(
"--pact-provider-name", default=None, help="pact name of provider being verified"
)
group.addoption(
"--pact-consumer-name",
default=None,
help="consumer name to limit pact verification to - "
"DEPRECATED, use --pact-verify-consumer instead",
)
group.addoption(
"--pact-verify-consumer", default=None, help="consumer name to limit pact verification to"
)
group.addoption(
"--pact-verify-consumer-tag",
metavar="TAG",
action="append",
help="limit broker pacts verified to those matching the tag. May be "
"specified multiple times in which case pacts matching any of these "
"tags will be verified.",
)
group.addoption(
"--pact-publish-results",
action="store_true",
default=False,
help="report pact verification results to pact broker",
)
group.addoption(
"--pact-provider-version",
default=None,
help="provider version to use when reporting pact results to pact broker",
)
group.addoption(
"--pact-allow-fail",
default=False,
action="store_true",
help="do not fail the pytest run if any pacts fail verification",
)
# Future options to be implemented. Listing them here so naming consistency can be a thing.
# group.addoption("--pact-publish-pacts", action="store_true", default=False,
# help="publish pacts to pact broker")
# group.addoption("--pact-consumer-version", default=None,
# help="consumer version to use when publishing pacts to the broker")
# group.addoption("--pact-consumer-version-source", default=None,
# help="generate consumer version from source 'git-tag' or 'git-hash'")
# group.addoption("--pact-consumer-version-tag", metavar='TAG', action="append",
# help="tag(s) that should be applied to the consumer version when pacts "
# "are uploaded to the broker; multiple tags may be supplied")
def get_broker_url(config):
return config.getoption("pact_broker_url") or os.environ.get("PACT_BROKER_URL")
def get_provider_name(config):
return config.getoption("pact_provider_name") or os.environ.get("PACT_PROVIDER_NAME")
# add the pact broker URL to the pytest output if running verbose
def pytest_report_header(config):
if config.getoption("verbose") > 0:
location = get_broker_url(config) or config.getoption("pact_files")
return [f"Loading pacts from {location}"]
def pytest_configure(config):
logging.getLogger("pactman").handlers = []
logging.basicConfig(format="%(message)s")
verbosity = config.getoption("verbose")
if verbosity > 0:
log.setLevel(logging.DEBUG)
class PytestPactVerifier:
def __init__(self, publish_results, provider_version, interaction, consumer):
self.publish_results = publish_results
self.provider_version = provider_version
self.interaction = interaction
self.consumer = consumer
def verify(self, provider_url, provider_setup, extra_provider_headers={}):
try:
self.interaction.verify_with_callable_setup(provider_url, provider_setup, extra_provider_headers)
except (Failed, AssertionError) as e:
raise Failed(str(e)) from None
def finish(self):
if self.consumer and self.publish_results and self.provider_version:
self.consumer.publish_result(self.provider_version)
def flatten_pacts(pacts):
for consumer in pacts:
last = consumer.interactions[-1]
for interaction in consumer.interactions:
if interaction is last:
yield (interaction, consumer)
else:
yield (interaction, None)
def load_pact_files(file_location):
for filename in glob.glob(file_location, recursive=True):
yield BrokerPact.load_file(filename, result_factory=PytestResult)
def test_id(identifier):
interaction, _ = identifier
return str(interaction)
def pytest_generate_tests(metafunc):
if "pact_verifier" in metafunc.fixturenames:
broker_url = get_broker_url(metafunc.config)
if not broker_url:
pact_files_location = metafunc.config.getoption("pact_files")
if not pact_files_location:
raise ValueError("need a --pact-broker-url or --pact-files option")
pact_files = load_pact_files(pact_files_location)
metafunc.parametrize(
"pact_verifier", flatten_pacts(pact_files), ids=test_id, indirect=True
)
else:
provider_name = get_provider_name(metafunc.config)
if not provider_name:
raise ValueError("--pact-broker-url requires the --pact-provider-name option")
broker = PactBrokerConfig(
broker_url,
metafunc.config.getoption("pact_broker_token"),
metafunc.config.getoption("pact_verify_consumer_tag", []),
)
broker_pacts = BrokerPacts(
provider_name, pact_broker=broker, result_factory=PytestResult
)
pacts = broker_pacts.consumers()
filter_consumer_name = metafunc.config.getoption("pact_verify_consumer")
if not filter_consumer_name:
filter_consumer_name = metafunc.config.getoption("pact_consumer_name")
if filter_consumer_name:
warnings.warn(
"The --pact-consumer-name command-line option is deprecated "
"and will be removed in the 3.0.0 release.",
DeprecationWarning,
)
if filter_consumer_name:
pacts = [pact for pact in pacts if pact.consumer == filter_consumer_name]
metafunc.parametrize("pact_verifier", flatten_pacts(pacts), ids=test_id, indirect=True)
class PactTestReport(TestReport):
"""Custom TestReport that allows us to attach an interaction to the result, and
then display the interaction's verification result ouput as well as the traceback
of the failure.
"""
@classmethod
def from_item_and_call(cls, item, call, interaction):
report = super().from_item_and_call(item, call)
report.pact_interaction = interaction
# the toterminal() call can't reasonably get at this config, so we store it here
report.verbosity = item.config.option.verbose
return report
def toterminal(self, out):
out.line("Pact failure details:", bold=True)
for text, kw in self.pact_interaction.result.results_for_terminal():
out.line(text, **kw)
if self.verbosity > 0:
out.line("Traceback:", bold=True)
return super().toterminal(out)
else:
out.line("Traceback not shown, use pytest -v to show it")
def pytest_runtest_makereport(item, call):
if call.when != "call" or "pact_verifier" not in getattr(item, "fixturenames", []):
return
# use our custom TestReport subclass if we're reporting on a pact verification call
interaction = item.funcargs["pact_verifier"].interaction
report = PactTestReport.from_item_and_call(item, call, interaction)
if report.failed and item.config.getoption("pact_allow_fail"):
# convert the fail into an "expected" fail, which allows the run to pass
report.wasxfail = True
report.outcome = "passed"
return report
def pytest_report_teststatus(report, config):
if not hasattr(report, "pact_interaction"):
return
if hasattr(report, "wasxfail"):
# wasxfail usually displays an "X" but since it's not *expected* to fail an "f" is a little clearer
return "ignore fail", "f", "IGNORE_FAIL"
@pytest.fixture()
def pact_verifier(pytestconfig, request):
interaction, consumer = request.param
p = PytestPactVerifier(
pytestconfig.getoption("pact_publish_results"),
pytestconfig.getoption("pact_provider_version"),
interaction,
consumer,
)
yield p
p.finish()
|
4337
|
import logging
import os
import re
import uuid
from pathlib import Path
from ludwig.constants import CHECKSUM, META, TEST, TRAINING, VALIDATION
from ludwig.data.cache.util import calculate_checksum
from ludwig.utils import data_utils
from ludwig.utils.fs_utils import delete, path_exists
logger = logging.getLogger(__name__)
def alphanum(v):
"""Filters a string to only its alphanumeric characters."""
return re.sub(r"\W+", "", v)
class DatasetCache:
def __init__(self, config, checksum, cache_map, dataset_manager):
self.config = config
self.checksum = checksum
self.cache_map = cache_map
self.dataset_manager = dataset_manager
def get(self):
training_set_metadata_fp = self.cache_map[META]
if not path_exists(training_set_metadata_fp):
return None
cache_training_set_metadata = data_utils.load_json(training_set_metadata_fp)
cached_training_set = self.cache_map[TRAINING] if path_exists(self.cache_map[TRAINING]) else None
cached_test_set = self.cache_map[TEST] if path_exists(self.cache_map[TEST]) else None
cached_validation_set = self.cache_map[VALIDATION] if path_exists(self.cache_map[VALIDATION]) else None
valid = self.checksum == cache_training_set_metadata.get(CHECKSUM) and cached_training_set is not None
return valid, cache_training_set_metadata, cached_training_set, cached_test_set, cached_validation_set
def put(self, training_set, test_set, validation_set, training_set_metadata):
logger.info("Writing preprocessed training set cache")
training_set = self.dataset_manager.save(
self.cache_map[TRAINING],
training_set,
self.config,
training_set_metadata,
TRAINING,
)
if test_set is not None:
logger.info("Writing preprocessed test set cache")
test_set = self.dataset_manager.save(
self.cache_map[TEST],
test_set,
self.config,
training_set_metadata,
TEST,
)
if validation_set is not None:
logger.info("Writing preprocessed validation set cache")
validation_set = self.dataset_manager.save(
self.cache_map[VALIDATION],
validation_set,
self.config,
training_set_metadata,
VALIDATION,
)
logger.info("Writing train set metadata")
data_utils.save_json(self.cache_map[META], training_set_metadata)
return training_set, test_set, validation_set, training_set_metadata
def delete(self):
for fname in self.cache_map.values():
if path_exists(fname):
delete(fname)
class CacheManager:
def __init__(self, dataset_manager, cache_dir=None):
self._dataset_manager = dataset_manager
self._cache_dir = cache_dir
def get_dataset_cache(self, config, dataset=None, training_set=None, test_set=None, validation_set=None):
if dataset is not None:
key = self.get_cache_key(dataset, config)
cache_map = {
META: self.get_cache_path(dataset, key, META, "json"),
TRAINING: self.get_cache_path(dataset, key, TRAINING),
TEST: self.get_cache_path(dataset, key, TEST),
VALIDATION: self.get_cache_path(dataset, key, VALIDATION),
}
return DatasetCache(config, key, cache_map, self._dataset_manager)
else:
key = self.get_cache_key(training_set, config)
cache_map = {
META: self.get_cache_path(training_set, key, META, "json"),
TRAINING: self.get_cache_path(training_set, key, TRAINING),
TEST: self.get_cache_path(test_set, key, TEST),
VALIDATION: self.get_cache_path(validation_set, key, VALIDATION),
}
return DatasetCache(config, key, cache_map, self._dataset_manager)
def get_cache_key(self, dataset, config):
if not isinstance(dataset, str):
# TODO(travis): could try hashing the in-memory dataset, but this is tricky for Dask
return str(uuid.uuid1())
return calculate_checksum(dataset, config)
def get_cache_path(self, dataset, key, tag, ext=None):
if not isinstance(dataset, str):
dataset = None
if self._cache_dir is None and dataset is not None:
# Use the input dataset filename (minus the extension) as the cache path
stem = Path(dataset).stem
else:
# To avoid collisions across different directories, we use the unique checksum
# as the cache path
stem = alphanum(key)
ext = ext or self.data_format
cache_fname = f"{stem}.{tag}.{ext}"
return os.path.join(self.get_cache_directory(dataset), cache_fname)
def get_cache_directory(self, input_fname):
if self._cache_dir is None:
if input_fname is not None:
return os.path.dirname(input_fname)
return "."
return self._cache_dir
def can_cache(self, skip_save_processed_input):
return self._dataset_manager.can_cache(skip_save_processed_input)
@property
def data_format(self):
return self._dataset_manager.data_format
|
4351
|
from guillotina.contrib.workflows.interfaces import IWorkflowChangedEvent
from guillotina.events import ObjectEvent
from zope.interface import implementer
@implementer(IWorkflowChangedEvent)
class WorkflowChangedEvent(ObjectEvent):
"""An object has been moved"""
def __init__(self, object, workflow, action, comments):
ObjectEvent.__init__(self, object)
self.object = object
self.workflow = workflow
self.action = action
self.comments = comments
|
4353
|
import pydbhub
from typing import Any, Dict, List, Tuple
from json.decoder import JSONDecodeError
import requests
import io
def send_request_json(query_url: str, data: Dict[str, Any]) -> Tuple[List[Any], str]:
"""
send_request_json sends a request to DBHub.io, formatting the returned result as JSON
Parameters
----------
query_url : str
url of the API endpoint
data : Dict[str, Any]
data to be processed to the server.
Returns
-------
Tuple[List[Any], str]
The returned data is
- a list of JSON object.
- a string describe error if occurs
"""
try:
headers = {'User-Agent': f'pydbhub v{pydbhub.__version__}'}
response = requests.post(query_url, data=data, headers=headers)
response.raise_for_status()
return response.json(), None
except JSONDecodeError as e:
return None, e.args[0]
except TypeError as e:
return None, e.args[0]
except requests.exceptions.HTTPError as e:
try:
return response.json(), e.args[0]
except JSONDecodeError:
return None, e.args[0]
except requests.exceptions.RequestException as e:
cause = e.args(0)
return None, str(cause.args[0])
def send_request(query_url: str, data: Dict[str, Any]) -> Tuple[List[bytes], str]:
"""
send_request sends a request to DBHub.io.
Parameters
---- query_url : str
url of the API endpoint
data : Dict[str, Any]
data to be processed to the server.------
Returns
-------
List[bytes]
database file is returned as a list of bytes
"""
try:
headers = {'User-Agent': f'pydbhub v{pydbhub.__version__}'}
response = requests.post(query_url, data=data, headers=headers)
response.raise_for_status()
return response.content, None
except requests.exceptions.HTTPError as e:
return None, e.args[0]
except requests.exceptions.RequestException as e:
cause = e.args(0)
return None, str(cause.args[0])
def send_upload(query_url: str, data: Dict[str, Any], db_bytes: io.BufferedReader) -> Tuple[List[Any], str]:
"""
send_upload uploads a database to DBHub.io.
Parameters
----------
query_url : str
url of the API endpoint.
data : Dict[str, Any]
data to be processed to the server.
db_bytes : io.BufferedReader
A buffered binary stream of the database file.
Returns
-------
Tuple[List[Any], str]
The returned data is
- a list of JSON object.
- a string describe error if occurs
"""
try:
headers = {'User-Agent': f'pydbhub v{pydbhub.__version__}'}
files = {"file": db_bytes}
response = requests.post(query_url, data=data, headers=headers, files=files)
response.raise_for_status()
if response.status_code != 201:
# The returned status code indicates something went wrong
try:
return response.json(), str(response.status_code)
except JSONDecodeError:
return None, str(response.status_code)
return response.json(), None
except requests.exceptions.HTTPError as e:
try:
return response.json(), e.args[0]
except JSONDecodeError:
return None, e.args[0]
except requests.exceptions.RequestException as e:
cause = e.args(0)
return None, str(cause.args[0])
|
4374
|
from __future__ import print_function
import time
import weeutil.weeutil
import weewx.manager
import weewx.xtypes
archive_sqlite = {'database_name': '/home/weewx/archive/weepwr.sdb', 'driver': 'weedb.sqlite'}
archive_mysql = {'database_name': 'weewx', 'user': 'weewx', 'password': '<PASSWORD>', 'driver': 'weedb.mysql'}
sql_str = "SELECT %s(%s), MIN(usUnits), MAX(usUnits) FROM %s " \
"WHERE dateTime > ? AND dateTime <= ?" % ('avg', 'outTemp', 'archive')
timespan = weeutil.weeutil.TimeSpan(1573245000, 1573246800)
timespan = weeutil.weeutil.TimeSpan(1573245000, 1573245000 + 600)
print('timespan=', timespan)
with weewx.manager.Manager.open(archive_sqlite) as db_manager:
interpolate_dict = {
'aggregate_type': 'diff',
'obs_type': 'ch8_a_energy2',
'table_name': db_manager.table_name,
'start': timespan.start,
'stop': timespan.stop,
}
SQL_TEMPLATE = "SELECT (ch8_a_energy2 - (SELECT ch8_a_energy2 FROM archive WHERE dateTime=%(start)s)) / (%(stop)s - %(start)s) FROM archive WHERE dateTime=%(stop)s;"
SQL_TEMPLATE = """Select a.dateTime as StartTime
, b.dateTime as EndTime
, b.dateTime-a.dateTime as TimeChange
, b.ch8_a_energy2-a.ch8_a_energy2 as ValueChange
FROM archive a
Inner Join archive b ON b.dateTime>=1573245000 AND b.dateTime<=(1573245000 + 600)"""
SQL_TEMPLATE = """Select a.dateTime as StartTime, b.datetime as EndTime, b.dateTime-a.dateTime as TimeChange, b.ch8_a_energy2-a.ch8_a_energy2 as ValueChange
FROM archive a, archive b WHERE b.dateTime = (Select MAX(c.dateTime) FROM archive c WHERE c.dateTime<=(1573245000+600)) AND a.dateTime = (SELECT MIN(dateTime) FROM archive WHERE dateTime>=1573245000);"""
SQL_TEMPLATE = """Select a.dateTime as StartTime, b.datetime as EndTime, b.dateTime-a.dateTime as TimeChange, b.ch8_a_energy2-a.ch8_a_energy2 as ValueChange
FROM archive a, archive b WHERE b.dateTime = (Select MAX(dateTime) FROM archive WHERE dateTime<=(1573245000+600)) AND a.dateTime = (SELECT MIN(dateTime) FROM archive WHERE dateTime>=1573245000);"""
SQL_TEMPLATE = "SELECT (b.%(obs_type)s - a.%(obs_type)s) / (b.dateTime-a.dateTime) "\
"FROM archive a, archive b "\
"WHERE b.dateTime = (SELECT MAX(dateTime) FROM archive WHERE dateTime <= %(stop)s) "\
"AND a.dateTime = (SELECT MIN(dateTime) FROM archive WHERE dateTime >= %(start)s);"
sql_stmt = SQL_TEMPLATE % interpolate_dict
print(sql_stmt)
# Get the number of records
with db_manager.connection.cursor() as cursor:
for row in cursor.execute(sql_stmt):
print(row)
|
4406
|
from django.db.models.fields.files import (FieldFile, ImageField,
ImageFileDescriptor)
from django.utils.translation import ugettext as _
from .backends import get_backend_class
from .files import VideoFile
class VideoFileDescriptor(ImageFileDescriptor):
pass
class VideoFieldFile(VideoFile, FieldFile):
def delete(self, save=True):
# Clear the video info cache
if hasattr(self, '_info_cache'):
del self._info_cache
super(VideoFieldFile, self).delete(save=save)
class VideoField(ImageField):
attr_class = VideoFieldFile
descriptor_class = VideoFileDescriptor
description = _("Video")
def __init__(self, verbose_name=None, name=None, duration_field=None,
**kwargs):
self.duration_field = duration_field
super(VideoField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(ImageField, self).check(**kwargs)
errors.extend(self._check_backend())
return errors
def _check_backend(self):
backend = get_backend_class()
return backend.check()
def to_python(self, data):
# use FileField method
return super(ImageField, self).to_python(data)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
_file = getattr(instance, self.attname)
# we need a real file
if not _file._committed:
return
# write `width` and `height`
super(VideoField, self).update_dimension_fields(instance, force,
*args, **kwargs)
if not self.duration_field:
return
# Nothing to update if we have no file and not being forced to update.
if not _file and not force:
return
if getattr(instance, self.duration_field) and not force:
return
# get duration if file is defined
duration = _file.duration if _file else None
# update duration
setattr(instance, self.duration_field, duration)
def formfield(self, **kwargs):
# use normal FileFieldWidget for now
return super(ImageField, self).formfield(**kwargs)
|
4420
|
import math
import os
from copy import deepcopy
from ast import literal_eval
import pandas as pd
from math import factorial
import random
from collections import Counter, defaultdict
import sys
from nltk import word_tokenize
from tqdm import tqdm, trange
import argparse
import numpy as np
import re
import csv
from sklearn.model_selection import train_test_split
from swda.swda import CorpusReader, Transcript, Utterance
act2word = {1:"inform",2:"question", 3:"directive", 4:"commissive"}
def permute(sents, sent_DAs, amount):
""" return a list of different! permuted sentences and their respective dialog acts """
""" if amount is greater than the possible amount of permutations, only the uniquely possible ones are returned """
assert len(sents) == len(sent_DAs), "length of permuted sentences and list of DAs must be equal"
if amount == 0:
return []
permutations = [list(range(len(sents)))]
amount = min(amount, factorial(len(sents))-1)
for i in range(amount):
permutation = np.random.permutation(len(sents))
while permutation.tolist() in permutations:
permutation = np.random.permutation(len(sents))
permutations.append(permutation.tolist())
return permutations[1:] #the first one is the original, which was included s.t. won't be generated
def draw_rand_sent(act_utt_df, sent_len, amount):
""" df is supposed to be a pandas dataframe with colums 'act' and 'utt' (utterance),
with act being a number from 1 to 4 and utt being a sentence """
permutations = []
for _ in range(amount):
(utt, da, name, ix) = draw_rand_sent_from_df(act_utt_df)
sent_insert_ix = random.randint(0, sent_len-1)
permutations.append((utt, da, name, ix, sent_insert_ix))
return permutations
def draw_rand_sent_from_df(df):
ix = random.randint(0, len(df['utt'])-1)
return literal_eval(df['utt'][ix]), df['act'][ix], df['dialogue'][ix], df['ix'][ix]
def half_perturb(sents, sent_DAs, amount):
assert len(sents) == len(sent_DAs), "length of permuted sentences and list of DAs must be equal"
permutations = [list(range(len(sents)))]
for _ in range(amount):
while True:
speaker = random.randint(0,1) # choose one of the speakers
speaker_ix = list(filter(lambda x: (x-speaker) % 2 == 0, range(len(sents))))
permuted_speaker_ix = np.random.permutation(speaker_ix)
new_sents = list(range(len(sents)))
for (i_to, i_from) in zip(speaker_ix, permuted_speaker_ix):
new_sents[i_to] = i_from
if (not new_sents == permutations[0]) and (
not new_sents in permutations or len(permutations) > math.factorial(len(speaker_ix))):
permutations.append(new_sents)
break
return permutations[1:]
def utterance_insertions(length, amount):
possible_permutations = []
original = list(range(length))
for ix in original:
for y in range(length):
if ix == y: continue
ix_removed = original[0:ix] + ([] if ix == length-1 else original[ix+1:])
ix_removed.insert(y, ix)
possible_permutations.append(deepcopy(ix_removed))
permutations = []
for _ in range(amount):
i = random.randint(0, len(possible_permutations)-1)
permutations.append(possible_permutations[i])
return permutations
class DailyDialogConverter:
def __init__(self, data_dir, tokenizer, word2id, task='', ranking_dataset = True):
self.data_dir = data_dir
self.act_utt_file = os.path.join(data_dir, 'act_utt_name.txt')
self.tokenizer = tokenizer
self.word2id = word2id
self.output_file = None
self.task = task
self.ranking_dataset = ranking_dataset
self.perturbation_statistics = 0
self.setname = os.path.split(data_dir)[1]
assert self.setname == 'train' or self.setname == 'validation' or self.setname == 'test', "wrong data dir name"
def create_act_utt(self):
dial_file = os.path.join(self.data_dir, "dialogues_{}.txt".format(self.setname))
act_file = os.path.join(self.data_dir, "dialogues_act_{}.txt".format(self.setname))
output_file = os.path.join(self.data_dir, 'act_utt_name.txt'.format(self.task))
df = open(dial_file, 'r')
af = open(act_file, 'r')
of = open(output_file, 'w')
csv_writer = csv.writer(of, delimiter='|')
for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118):
seqs = dial.split('__eou__')
seqs = seqs[:-1]
if len(seqs) < 5:
continue
tok_seqs = [self.tokenizer(seq) for seq in seqs]
tok_seqs = [[w.lower() for w in utt] for utt in tok_seqs]
tok_seqs = [self.word2id(seq) for seq in tok_seqs]
acts = act.split(' ')
acts = acts[:-1]
acts = [int(act) for act in acts]
for utt_i, (act, utt) in enumerate(zip(acts, tok_seqs)):
dialog_name = "{}_{}".format(self.setname, line_count)
row = (act, utt, dialog_name,utt_i)
csv_writer.writerow(row)
def convert_dset(self, amounts):
# data_dir is supposed to be the dir with the respective train/test/val-dataset files
print("Creating {} perturbations for task {}".format(amounts, self.task))
dial_file = os.path.join(self.data_dir, "dialogues_{}.txt".format(self.setname))
act_file = os.path.join(self.data_dir, "dialogues_act_{}.txt".format(self.setname))
self.output_file = os.path.join(self.data_dir, 'coherency_dset_{}.txt'.format(self.task))
root_data_dir = os.path.split(self.data_dir)[0]
shuffled_path = os.path.join(root_data_dir, "shuffled_{}".format(self.task))
if not os.path.isdir(shuffled_path):
os.mkdir(shuffled_path)
assert os.path.isfile(dial_file) and os.path.isfile(act_file), "could not find input files"
assert os.path.isfile(self.act_utt_file), "missing act_utt.txt in data_dir"
with open(self.act_utt_file, 'r') as f:
act_utt_df = pd.read_csv(f, sep='|', names=['act','utt','dialogue','ix'])
rand_generator = lambda: draw_rand_sent_from_df(act_utt_df)
df = open(dial_file, 'r')
af = open(act_file, 'r')
of = open(self.output_file, 'w')
discarded = 0
for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118):
seqs = dial.split('__eou__')
seqs = seqs[:-1]
if len(seqs) < 5:
discarded += 1
continue
tok_seqs = [self.tokenizer(seq) for seq in seqs]
tok_seqs = [[w.lower() for w in utt] for utt in tok_seqs]
tok_seqs = [self.word2id(seq) for seq in tok_seqs]
acts = act.split(' ')
acts = acts[:-1]
acts = [int(act) for act in acts]
if self.task == 'up':
permuted_ixs = permute(tok_seqs, acts, amounts)
elif self.task == 'us':
permuted_ixs = draw_rand_sent(act_utt_df, len(tok_seqs), amounts)
elif self.task == 'hup':
permuted_ixs = half_perturb(tok_seqs, acts, amounts)
elif self.task == 'ui':
permuted_ixs = utterance_insertions(len(tok_seqs), amounts)
shuffle_file = os.path.join(shuffled_path, "{}_{}.csv".format(self.setname, line_count))
with open(shuffle_file, "w") as f:
csv_writer = csv.writer(f)
for perm in permuted_ixs:
if self.task == 'us':
(utt, da, name, ix, insert_ix) = perm
row = [name, ix,insert_ix]
csv_writer.writerow(row)
else:
csv_writer.writerow(perm)
self.perturbation_statistics += len(permuted_ixs)
if self.task == 'us':
for p in permuted_ixs:
(insert_sent, insert_da, name, ix, insert_ix) = p
a = " ".join([str(a) for a in acts])
u = str(tok_seqs)
p_a = deepcopy(acts)
p_a[insert_ix] = insert_da
pa = " ".join([str(a) for a in p_a])
p_u = deepcopy(tok_seqs)
p_u[insert_ix] = self.word2id(insert_sent)
of.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u))
of.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u))
else:
for p in permuted_ixs:
a = " ".join([str(a) for a in acts])
u = str(tok_seqs)
pa = [acts[i] for i in p]
p_a = " ".join([str(a) for a in pa])
pu = [tok_seqs[i] for i in p]
p_u = str(pu)
of.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u))
of.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u))
print(discarded)
class SwitchboardConverter:
def __init__(self, data_dir, tokenizer, word2id, task='', seed=42):
self.corpus = CorpusReader(data_dir)
self.data_dir = data_dir
self.tokenizer = tokenizer
self.word2id = word2id
self.task = task
self.utt_num = 0
for utt in self.corpus.iter_utterances():
self.utt_num += 1
self.trans_num = 0
for trans in self.corpus.iter_transcripts():
self.trans_num += 1
self.da2num = switchboard_da_mapping()
# CAUTION: make sure that for each task the seed is the same s.t. the splits will be the same!
train_ixs, val_ixs = train_test_split(range(self.trans_num), shuffle=True, train_size=0.8, random_state=seed)
val_ixs, test_ixs = train_test_split(val_ixs, shuffle=True, train_size=0.5, random_state=seed)
self.train_ixs, self.val_ixs, self.test_ixs = train_ixs, val_ixs, test_ixs
self.utt_da_pairs = []
prev_da = "%"
for i, utt in enumerate(self.corpus.iter_utterances()):
sentence = re.sub(r"([+/\}\[\]]|\{\w)", "",
utt.text)
sentence = self.word2id(self.tokenizer(sentence))
act = utt.damsl_act_tag()
if act == None: act = "%"
if act == "+": act = prev_da
_, swda_name = os.path.split(utt.swda_filename)
swda_name = swda_name[:-4] if swda_name.endswith('.csv') else swda_name
ix = utt.utterance_index
self.utt_da_pairs.append((sentence, act, swda_name, ix))
def draw_rand_sent(self):
r = random.randint(0, len(self.utt_da_pairs)-1)
return self.utt_da_pairs[r]
def create_vocab(self):
print("Creating Vocab file for Switchboard")
cnt = Counter()
for utt in self.corpus.iter_utterances():
sentence = re.sub(r"([+/\}\[\]]|\{\w)", "",
utt.text)
sentence = self.tokenizer(sentence)
for w in sentence:
cnt[w] += 1
itos_file = os.path.join(self.data_dir, "itos.txt")
itosf = open(itos_file, "w")
for (word, _) in cnt.most_common(25000):
itosf.write("{}\n".format(word))
#getKeysByValue
def swda_permute(self, sents, amount, speaker_ixs):
if amount == 0:
return []
permutations = [list(range(len(sents)))]
segment_permutations = []
amount = min(amount, factorial(len(sents))-1)
segm_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segm_ixs.values()))
for i in range(amount):
while True:
permutation = []
segm_perm = np.random.permutation(len(segments))
segment_permutations.append(segm_perm)
for segm_ix in segm_perm:
utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix))
permutation = permutation + utt_ixs
if permutation not in permutations:
break
permutations.append(permutation)
return permutations[1:] , segment_permutations #the first one is the original, which was included s.t. won't be generated
def speaker_segment_ixs(self, speaker_ixs):
i = 0
segment_indices = dict()
prev_speaker = speaker_ixs[0]
for j,speaker in enumerate(speaker_ixs):
if speaker != prev_speaker:
prev_speaker = speaker
i += 1
segment_indices[j] = i
return segment_indices
def swda_half_perturb(self, amount, speaker_ixs):
segm_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segm_ixs.values()))
segment_permutations = []
permutations = [list(segm_ixs.keys())]
for _ in range(amount):
speaker = random.randint(0,1) # choose one of the speakers
speaker_to_perm = list(filter(lambda x: (x-speaker) % 2 == 0, segments))
speaker_orig = list(filter(lambda x: (x-speaker) % 2 != 0, segments))
#TODO: rename either speaker_ix or speaker_ixs, they are something different, but the names are too close
if len(speaker_to_perm) < 2:
return []
while True:
permuted_speaker_ix = np.random.permutation(speaker_to_perm).tolist()
new_segments = [None]*(len(speaker_orig)+len(permuted_speaker_ix))
if speaker == 0 :
new_segments[::2] = permuted_speaker_ix
new_segments[1::2] = speaker_orig
else:
new_segments[1::2] = permuted_speaker_ix
new_segments[::2] = speaker_orig
segment_permutations.append(new_segments)
permutation = []
for segm_ix in new_segments:
utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix))
permutation = permutation + utt_ixs
if not permutation in permutations:
permutations.append(permutation)
break
return permutations[1:], segment_permutations
def swda_utterance_insertion(self, speaker_ixs, amounts):
segment_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segment_ixs.values()))
segment_permutations = []
permutations = []
i = 0
for _ in range(amounts):
while True: # actually: do ... while permutation not in permutations
i_from = random.randint(0, len(segments)-1)
i_to = random.randint(0, len(segments)-2)
segm_perm = deepcopy(segments)
rem_elem = segments[i_from]
segm_perm = segm_perm[0:i_from] + segm_perm[i_from+1:]
segm_perm = segm_perm[0:i_to] + [rem_elem] + segm_perm[i_to:]
permutation = []
for segm_ix in segm_perm:
utt_ixs = sorted(getKeysByValue(segment_ixs, segm_ix))
permutation = permutation + utt_ixs
if permutation not in permutations:
permutations.append(permutation)
segment_permutations.append(segm_perm)
break
return permutations, segment_permutations
def swda_utterance_sampling(self, speaker_ixs, amount):
segm_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segm_ixs.values()))
permutations = []
for i in range(amount):
(sentence, act, swda_name, ix) = self.draw_rand_sent()
insert_ix = random.choice(segments)
permutations.append((sentence, act, swda_name, ix, insert_ix))
return permutations
def convert_dset(self, amounts):
# create distinct train/validation/test files. they'll correspond to the created
# splits from the constructor
train_output_file = os.path.join(self.data_dir, 'train', 'coherency_dset_{}.txt'.format(self.task))
val_output_file = os.path.join(self.data_dir, 'validation', 'coherency_dset_{}.txt'.format(self.task))
test_output_file = os.path.join(self.data_dir, 'test', 'coherency_dset_{}.txt'.format(self.task))
if not os.path.exists(os.path.join(self.data_dir, 'train')):
os.makedirs(os.path.join(self.data_dir, 'train'))
if not os.path.exists(os.path.join(self.data_dir, 'validation')):
os.makedirs(os.path.join(self.data_dir, 'validation'))
if not os.path.exists(os.path.join(self.data_dir, 'test')):
os.makedirs(os.path.join(self.data_dir, 'test'))
trainfile = open(train_output_file, 'w')
valfile = open(val_output_file, 'w')
testfile = open(test_output_file, 'w')
shuffled_path = os.path.join(self.data_dir, "shuffled_{}".format(self.task))
if not os.path.isdir(shuffled_path):
os.mkdir(shuffled_path)
for i,trans in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)):
utterances = []
acts = []
speaker_ixs = []
prev_act = "%"
for utt in trans.utterances:
sentence = re.sub(r"([+/\}\[\]]|\{\w)", "",
utt.text)
sentence = self.word2id(self.tokenizer(sentence))
utterances.append(sentence)
act = utt.damsl_act_tag()
if act == None: act = "%"
if act == "+": act = prev_act
acts.append(self.da2num[act])
prev_act = act
if "A" in utt.caller:
speaker_ixs.append(0)
else:
speaker_ixs.append(1)
if self.task == 'up':
permuted_ixs , segment_perms = self.swda_permute(utterances, amounts, speaker_ixs)
elif self.task == 'us':
permuted_ixs = self.swda_utterance_sampling(speaker_ixs, amounts)
elif self.task == 'hup':
permuted_ixs , segment_perms = self.swda_half_perturb(amounts, speaker_ixs)
elif self.task == 'ui':
permuted_ixs, segment_perms = self.swda_utterance_insertion(speaker_ixs, amounts)
swda_fname = os.path.split(trans.swda_filename)[1]
shuffle_file = os.path.join(shuffled_path, swda_fname) # [:-4]
with open(shuffle_file, "w") as f:
csv_writer = csv.writer(f)
if self.task == 'us':
for perm in permuted_ixs:
(utt, da, name, ix, insert_ix) = perm
row = [name, ix,insert_ix]
csv_writer.writerow(row)
else:
for perm in segment_perms:
csv_writer.writerow(perm)
if self.task == 'us':
for p in permuted_ixs:
a = " ".join([str(x) for x in acts])
u = str(utterances)
insert_sent, insert_da, name, ix, insert_ix = p
insert_da = self.da2num[insert_da]
p_a = deepcopy(acts)
p_a[insert_ix] = insert_da
pa = " ".join([str(x) for x in p_a])
p_u = deepcopy(utterances)
p_u[insert_ix] = insert_sent
if i in self.train_ixs:
trainfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u))
trainfile.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u))
if i in self.val_ixs:
valfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u))
valfile.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u))
if i in self.test_ixs:
testfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u))
testfile.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u))
else:
for p in permuted_ixs:
a = " ".join([str(x) for x in acts])
u = str(utterances)
pa = [acts[i] for i in p]
p_a = " ".join([str(x) for x in pa])
pu = [utterances[i] for i in p]
p_u = str(pu)
if i in self.train_ixs:
trainfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u))
trainfile.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u))
if i in self.val_ixs:
valfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u))
valfile.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u))
if i in self.test_ixs:
testfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u))
testfile.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--datadir",
required=True,
type=str,
help="""The input directory where the files of the corpus
are located. """)
parser.add_argument("--corpus",
required=True,
type=str,
help="""the name of the corpus to use, currently either 'DailyDialog' or 'Switchboard' """)
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--amount',
type=int,
default=20,
help="random seed for initialization")
parser.add_argument('--word2id',
action='store_true',
help= "convert the words to ids")
parser.add_argument('--task',
required=True,
type=str,
default="up",
help="""for which task the dataset should be created.
alternatives: up (utterance permutation)
us (utterance sampling)
hup (half utterance petrurbation)
ui (utterance insertion, nothing directly added!)""")
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
if args.word2id:
f = open(os.path.join(args.datadir, "itos.txt"), "r")
word2id_dict = dict()
for i, word in enumerate(f):
word2id_dict[word[:-1].lower()] = i
word2id = lambda x: [word2id_dict[y] for y in x] # don't convert words to ids (yet). It gets done in the glove wrapper of mtl_coherence.py
else:
word2id = lambda x: x
tokenizer = word_tokenize
if args.corpus == 'DailyDialog':
converter = DailyDialogConverter(args.datadir, tokenizer, word2id, task=args.task)
converter.create_act_utt()
elif args.corpus == 'Switchboard':
converter = SwitchboardConverter(args.datadir, tokenizer, word2id, args.task, args.seed)
converter.create_vocab()
converter.convert_dset(amounts=args.amount)
def getKeysByValue(dictOfElements, valueToFind):
listOfKeys = list()
for item in dictOfElements.items():
if item[1] == valueToFind:
listOfKeys.append(item[0])
return listOfKeys
def switchboard_da_mapping():
mapping_dict = dict({
"sd": 1,
"b": 2,
"sv": 3,
"aa": 4,
"%-": 5,
"ba": 6,
"qy": 7,
"x": 8,
"ny": 9,
"fc": 10,
"%": 11,
"qw": 12,
"nn": 13,
"bk": 14,
"h": 15,
"qy^d": 16,
"o": 17,
"bh": 18,
"^q": 19,
"bf": 20,
"na": 21,
"ny^e": 22,
"ad": 23,
"^2": 24,
"b^m": 25,
"qo": 26,
"qh": 27,
"^h": 28,
"ar": 29,
"ng": 30,
"nn^e": 31,
"br": 32,
"no": 33,
"fp": 34,
"qrr": 35,
"arp": 36,
"nd": 37,
"t3": 38,
"oo": 39,
"co": 40,
"cc": 41,
"t1": 42,
"bd": 43,
"aap": 44,
"am": 45,
"^g": 46,
"qw^d": 47,
"fa": 48,
"ft":49
})
d = defaultdict(lambda: 11)
for (k, v) in mapping_dict.items():
d[k] = v
return d
if __name__ == "__main__":
main()
|
4442
|
import os
import sys
import unittest
from tests.tests_bin_class.test_performance import *
if __name__ == "__main__":
unittest.main()
|
4486
|
import string
import requests
import sys
import myparser
import re
class search_twitter:
def __init__(self, word, limit):
self.word = word.replace(' ', '%20')
self.results = ""
self.totalresults = ""
self.server = "www.google.com"
self.hostname = "www.google.com"
self.userAgent = "(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100116 Firefox/3.7"
self.quantity = "100"
self.limit = int(limit)
self.counter = 0
def do_search(self):
try:
urly="https://"+ self.server + "/search?num=100&start=" + str(self.counter) + "&hl=en&meta=&q=site%3Atwitter.com%20intitle%3A%22on+Twitter%22%20" + self.word
except Exception, e:
print e
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'}
try:
r=requests.get(urly,headers=headers)
except Exception,e:
print e
self.results = r.content
self.totalresults += self.results
def get_people(self):
rawres = myparser.parser(self.totalresults, self.word)
return rawres.people_twitter()
def process(self):
while (self.counter < self.limit):
self.do_search()
self.counter += 100
print "\tSearching " + str(self.counter) + " results.."
|
4493
|
def gen():
i = 0
while 1:
yield i
i += 1
g = gen()
try:
g.pend_throw
except AttributeError:
print("SKIP")
raise SystemExit
print(next(g))
print(next(g))
g.pend_throw(ValueError())
v = None
try:
v = next(g)
except Exception as e:
print("raised", repr(e))
print("ret was:", v)
# It's legal to pend exception in a just-started generator, just the same
# as it's legal to .throw() into it.
g = gen()
g.pend_throw(ValueError())
try:
next(g)
except ValueError:
print("ValueError from just-started gen")
|
4511
|
import math
import numpy as np
import numpy.random as npr
import torch
import torch.utils.data as data
import torch.utils.data.sampler as torch_sampler
from torch.utils.data.dataloader import default_collate
from torch._six import int_classes as _int_classes
from core.config import cfg
from roi_data.minibatch import get_minibatch
import utils.blob as blob_utils
# from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes
class RoiDataLoader(data.Dataset):
def __init__(self, roidb, num_classes, training=True):
self._roidb = roidb
self._num_classes = num_classes
self.training = training
self.DATA_SIZE = len(self._roidb)
def __getitem__(self, index_tuple):
index, ratio = index_tuple
single_db = [self._roidb[index]]
blobs, valid = get_minibatch(single_db, self._num_classes)
#TODO: Check if minibatch is valid ? If not, abandon it.
# Need to change _worker_loop in torch.utils.data.dataloader.py.
# Squeeze batch dim
# for key in blobs:
# if key != 'roidb':
# blobs[key] = blobs[key].squeeze(axis=0)
blobs['data'] = blobs['data'].squeeze(axis=0)
return blobs
def __len__(self):
return self.DATA_SIZE
def cal_minibatch_ratio(ratio_list):
"""Given the ratio_list, we want to make the RATIO same for each minibatch on each GPU.
Note: this only work for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob`
and 2) cfg.TRAIN.SCALES containing SINGLE scale.
Since all prepared images will have same min side length of cfg.TRAIN.SCALES[0], we can
pad and batch images base on that.
"""
DATA_SIZE = len(ratio_list)
ratio_list_minibatch = np.empty((DATA_SIZE,))
num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers
for i in range(num_minibatch):
left_idx = i * cfg.TRAIN.IMS_PER_BATCH
right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1)
if ratio_list[right_idx] < 1:
# for ratio < 1, we preserve the leftmost in each batch.
target_ratio = ratio_list[left_idx]
elif ratio_list[left_idx] > 1:
# for ratio > 1, we preserve the rightmost in each batch.
target_ratio = ratio_list[right_idx]
else:
# for ratio cross 1, we make it to be 1.
target_ratio = 1
ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio
return ratio_list_minibatch
class MinibatchSampler(torch_sampler.Sampler):
def __init__(self, ratio_list, ratio_index):
self.ratio_list = ratio_list
self.ratio_index = ratio_index
self.num_data = len(ratio_list)
def __iter__(self):
rand_perm = npr.permutation(self.num_data)
ratio_list = self.ratio_list[rand_perm]
ratio_index = self.ratio_index[rand_perm]
# re-calculate minibatch ratio list
ratio_list_minibatch = cal_minibatch_ratio(ratio_list)
return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
def __len__(self):
return self.num_data
class BatchSampler(torch_sampler.BatchSampler):
r"""Wraps another sampler to yield a mini-batch of indices.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(range(10), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(range(10), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, sampler, batch_size, drop_last):
if not isinstance(sampler, torch_sampler.Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx) # Difference: batch.append(int(idx))
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
def collate_minibatch(list_of_blobs):
"""Stack samples seperately and return a list of minibatches
A batch contains NUM_GPUS minibatches and image size in different minibatch may be different.
Hence, we need to stack smaples from each minibatch seperately.
"""
Batch = {key: [] for key in list_of_blobs[0]}
# Because roidb consists of entries of variable length, it can't be batch into a tensor.
# So we keep roidb in the type of "list of ndarray".
lists = []
for blobs in list_of_blobs:
lists.append({'data' : blobs.pop('data'),
'rois' : blobs.pop('rois'),
'labels' : blobs.pop('labels')})
for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH):
mini_list = lists[i:(i + cfg.TRAIN.IMS_PER_BATCH)]
minibatch = default_collate(mini_list)
for key in minibatch:
Batch[key].append(minibatch[key])
return Batch
|
4566
|
from .basic_controller import BasicMAC
from .cate_broadcast_comm_controller import CateBCommMAC
from .cate_broadcast_comm_controller_full import CateBCommFMAC
from .cate_broadcast_comm_controller_not_IB import CateBCommNIBMAC
from .tar_comm_controller import TarCommMAC
from .cate_pruned_broadcast_comm_controller import CatePBCommMAC
REGISTRY = {"basic_mac": BasicMAC,
"cate_broadcast_comm_mac": CateBCommMAC,
"cate_broadcast_comm_mac_full": CateBCommFMAC,
"cate_broadcast_comm_mac_not_IB": CateBCommNIBMAC,
"tar_comm_mac": TarCommMAC,
"cate_pruned_broadcast_comm_mac": CatePBCommMAC}
|
4569
|
from ..base import BaseModel
# returned from https://vk.com/dev/account.getActiveOffers
class ActiveOffer(BaseModel):
id: str = None
title: str = None
instruction: str = None
instruction_html: str = None
short_description: str = None
description: str = None
img: str = None
tag: str = None
price: int = None
|
4571
|
import re
import numbers
import collections
import logging
from collections.abc import Iterable
import itertools
import aws_error_utils
from .lookup import Ids, lookup_accounts_for_ou
from .format import format_account_id
LOGGER = logging.getLogger(__name__)
_Context = collections.namedtuple("_Context", [
"session",
"ids",
"principal",
"principal_filter",
"permission_set",
"permission_set_filter",
"target",
"target_filter",
"get_principal_names",
"get_permission_set_names",
"get_target_names",
"ou_recursive",
"cache",
"filter_cache"
])
def _filter(filter_cache, key, func, args):
if not func:
return True
if key not in filter_cache:
filter_cache[key] = func(*args)
return filter_cache[key]
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
def _is_principal_tuple(principal):
try:
return all([
len(principal) == 2,
isinstance(principal[0], str),
principal[0] in ["GROUP", "USER"],
isinstance(principal[1], str),
])
except:
return False
def _process_principal(principal):
if not principal:
return None
if isinstance(principal, str):
return [(None, principal)]
if _is_principal_tuple(principal):
return [tuple(principal)]
else:
return _flatten(_process_principal(p) for p in principal)
def _process_permission_set(ids, permission_set):
if not permission_set:
return None
if not isinstance(permission_set, str) and isinstance(permission_set, Iterable):
return _flatten(_process_permission_set(ids, ps) for ps in permission_set)
if permission_set.startswith("arn"):
permission_set_arn = permission_set
elif permission_set.startswith("ssoins-") or permission_set.startswith("ins-"):
permission_set_arn = f"arn:aws:sso:::permissionSet/{permission_set}"
elif permission_set.startswith("ps-"):
permission_set_arn = f"arn:aws:sso:::permissionSet/{ids.instance_id}/{permission_set}"
else:
raise TypeError(f"Invalid permission set id {permission_set}")
return [permission_set_arn]
def _is_target_tuple(target):
try:
return all([
len(target) == 2,
isinstance(target[0], str),
target[0] in ["AWS_OU", "AWS_ACCOUNT"],
isinstance(target[1], str),
])
except:
return False
def _process_target(target):
if not target:
return None
if isinstance(target, numbers.Number):
return [("AWS_ACCOUNT", format_account_id(target))]
if isinstance(target, str):
if re.match(r"^\d+$", target):
return [("AWS_ACCOUNT", format_account_id(target))]
elif re.match(r"^r-[a-z0-9]{4,32}$", target) or re.match(r"^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$", target):
return [("AWS_OU", target)]
else:
raise TypeError(f"Invalid target {target}")
elif _is_target_tuple(target):
target_type, target_id = target
if target_type not in ["AWS_ACCOUNT", "AWS_OU"]:
raise TypeError(f"Invalid target type {target_type}")
return [(target_type, target_id)]
else:
value = _flatten(_process_target(t) for t in target)
return value
def _get_account_iterator(target, context: _Context):
def target_iterator():
target_name = None
if context.get_target_names:
organizations_client = context.session.client("organizations")
account = organizations_client.describe_account(AccountId=target[1])["Account"]
if account.get("Name"):
target_name = account["Name"]
value = (*target, target_name)
if not _filter(context.filter_cache, value[1], context.target_filter, value):
LOGGER.debug(f"Account is filtered: {value}")
else:
LOGGER.debug(f"Visiting single account: {value}")
yield value
return target_iterator
def _get_ou_iterator(target, context: _Context):
def target_iterator():
target_name = None
# if context.get_target_names:
# organizations_client = context.session.client("organizations")
# ou = organizations_client.describe_organizational_unit(OrganizationalUnitId=target[1])["OrganizationalUnit"]
# if ou.get("Name"):
# target_name = ou("Name")
value = (*target, target_name)
accounts = lookup_accounts_for_ou(context.session, value[1], recursive=context.ou_recursive)
for account in accounts:
yield "AWS_ACCOUNT", account["Id"], account["Name"]
return target_iterator
def _get_single_target_iterator(target, context: _Context):
target_type = target[0]
if target_type == "AWS_ACCOUNT":
return _get_account_iterator(target, context)
elif target_type == "AWS_OU":
return _get_ou_iterator(target, context)
else:
raise TypeError(f"Invalid target type {target_type}")
def _get_all_accounts_iterator(context: _Context):
def target_iterator():
organizations_client = context.session.client("organizations")
accounts_paginator = organizations_client.get_paginator("list_accounts")
for response in accounts_paginator.paginate():
LOGGER.debug(f"ListAccounts page: {response}")
for account in response["Accounts"]:
account_id = account["Id"]
account_name = account["Name"]
value = ("AWS_ACCOUNT", account_id, account_name)
if not _filter(context.filter_cache, account_id, context.target_filter, value):
LOGGER.debug(f"Account is filtered: {value}")
continue
LOGGER.debug(f"Visiting account: {value}")
yield value
return target_iterator
def _get_target_iterator(context: _Context):
if context.target:
iterables = [_get_single_target_iterator(t, context) for t in context.target]
def target_iterator():
return itertools.chain(*[it() for it in iterables])
return target_iterator
else:
LOGGER.debug(f"Iterating for all accounts")
return _get_all_accounts_iterator(context)
def _get_single_permission_set_iterator(permission_set, context: _Context):
permission_set_arn = permission_set
permission_set_id = permission_set_arn.split("/")[-1]
def permission_set_iterator(target_type, target_id, target_name):
if not context.get_permission_set_names:
permission_set_name = None
else:
sso_admin_client = context.session.client("sso-admin")
response = sso_admin_client.describe_permission_set(
InstanceArn=context.ids.instance_arn,
PermissionSetArn=permission_set_arn
)
LOGGER.debug(f"DescribePermissionSet response: {response}")
permission_set_name = response["PermissionSet"]["Name"]
if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)):
LOGGER.debug(f"Single permission set is filtered: {(permission_set_id, permission_set_name)}")
else:
LOGGER.debug(f"Visiting single permission set {(permission_set_id, permission_set_name)}")
yield permission_set_arn, permission_set_id, permission_set_name
return permission_set_iterator
def _get_all_permission_sets_iterator(context: _Context):
def permission_set_iterator(target_type, target_id, target_name):
if target_type != "AWS_ACCOUNT":
raise TypeError(f"Unsupported target type {target_type}")
sso_admin_client = context.session.client("sso-admin")
permission_sets_paginator = sso_admin_client.get_paginator("list_permission_sets_provisioned_to_account")
for response in permission_sets_paginator.paginate(
InstanceArn=context.ids.instance_arn,
AccountId=target_id):
LOGGER.debug(f"ListPermissionSetsProvisionedToAccount {target_id} page: {response}")
if "PermissionSets" not in response:
continue
for permission_set_arn in response["PermissionSets"]:
permission_set_id = permission_set_arn.split("/", 2)[-1]
if not context.get_permission_set_names:
permission_set_name = None
else:
if permission_set_arn not in context.cache:
response = sso_admin_client.describe_permission_set(
InstanceArn=context.ids.instance_arn,
PermissionSetArn=permission_set_arn
)
LOGGER.debug(f"DescribePermissionSet response: {response}")
context.cache[permission_set_arn] = response["PermissionSet"]["Name"]
permission_set_name = context.cache[permission_set_arn]
if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)):
LOGGER.debug(f"Permission set is filtered: {(permission_set_id, permission_set_name)}")
continue
LOGGER.debug(f"Visiting permission set: {(permission_set_id, permission_set_name)}")
yield permission_set_arn, permission_set_id, permission_set_name
return permission_set_iterator
def _get_permission_set_iterator(context: _Context):
if context.permission_set:
iterables = [_get_single_permission_set_iterator(ps, context) for ps in context.permission_set]
def permission_set_iterator(target_type, target_id, target_name):
return itertools.chain(*[it(target_type, target_id, target_name) for it in iterables])
return permission_set_iterator
else:
LOGGER.debug("Iterating for all permission sets")
return _get_all_permission_sets_iterator(context)
def _get_principal_iterator(context: _Context):
def principal_iterator(
target_type, target_id, target_name,
permission_set_arn, permission_set_id, permission_set_name):
if target_type != "AWS_ACCOUNT":
raise TypeError(f"Unsupported target type {target_type}")
sso_admin_client = context.session.client("sso-admin")
identity_store_client = context.session.client("identitystore")
assignments_paginator = sso_admin_client.get_paginator("list_account_assignments")
for response in assignments_paginator.paginate(
InstanceArn=context.ids.instance_arn,
AccountId=target_id,
PermissionSetArn=permission_set_arn):
LOGGER.debug(f"ListAccountAssignments for {target_id} {permission_set_arn.split('/')[-1]} page: {response}")
if not response["AccountAssignments"] and not "NextToken" in response:
LOGGER.debug(f"No assignments for {target_id} {permission_set_arn.split('/')[-1]}")
for assignment in response["AccountAssignments"]:
principal_type = assignment["PrincipalType"]
principal_id = assignment["PrincipalId"]
LOGGER.debug(f"Visiting principal {principal_type}:{principal_id}")
if context.principal:
for principal in context.principal:
type_matches = (principal[0] is None or principal[0] != principal_type)
if type_matches and principal[1] == principal_id:
LOGGER.debug(f"Found principal {principal_type}:{principal_id}")
break
else:
LOGGER.debug(f"Principal {principal_type}:{principal_id} does not match principals")
continue
principal_key = (principal_type, principal_id)
if not context.get_principal_names:
principal_name = None
else:
if principal_key not in context.cache:
if principal_type == "GROUP":
try:
response = identity_store_client.describe_group(
IdentityStoreId=context.ids.identity_store_id,
GroupId=principal_id
)
LOGGER.debug(f"DescribeGroup response: {response}")
context.cache[principal_key] = response["DisplayName"]
except aws_error_utils.catch_aws_error("ResourceNotFoundException"):
context.cache[principal_key] = None
elif principal_type == "USER":
try:
response = identity_store_client.describe_user(
IdentityStoreId=context.ids.identity_store_id,
UserId=principal_id
)
LOGGER.debug(f"DescribeUser response: {response}")
context.cache[principal_key] = response["UserName"]
except aws_error_utils.catch_aws_error("ResourceNotFoundException"):
context.cache[principal_key] = None
else:
raise ValueError(f"Unknown principal type {principal_type}")
principal_name = context.cache[principal_key]
if not _filter(context.filter_cache, principal_key, context.principal_filter, (principal_type, principal_id, principal_name)):
if context.principal:
LOGGER.debug(f"Principal is filtered: {principal_type}:{principal_id}")
else:
LOGGER.debug(f"Principal is filtered: {principal_type}:{principal_id}")
continue
LOGGER.debug(f"Visiting principal: {principal_type}:{principal_id}")
yield principal_type, principal_id, principal_name
return principal_iterator
Assignment = collections.namedtuple("Assignment", [
"instance_arn",
"principal_type",
"principal_id",
"principal_name",
"permission_set_arn",
"permission_set_name",
"target_type",
"target_id",
"target_name",
])
def list_assignments(
session,
instance_arn=None,
identity_store_id=None,
principal=None,
principal_filter=None,
permission_set=None,
permission_set_filter=None,
target=None,
target_filter=None,
get_principal_names=False,
get_permission_set_names=False,
get_target_names=False,
ou_recursive=False):
"""Iterate over AWS SSO assignments.
Args:
session (boto3.Session): boto3 session to use
instance_arn (str): The SSO instance to use, or it will be looked up using ListInstances
identity_store_id (str): The identity store to use if principal names are being retrieved
or it will be looked up using ListInstances
principal: A principal specification or list of principal specifications.
A principal specification is a principal id or a 2-tuple of principal type and id.
principal_filter: A callable taking principal type, principal id, and principal name
(which may be None), and returning True if the principal should be included.
permission_set: A permission set arn or id, or a list of the same.
permission_set_filter: A callable taking permission set arn and name (name may be None),
returning True if the permission set should be included.
target: A target specification or list of target specifications.
A target specification is an account or OU id, or a 2-tuple of target type, which
is either AWS_ACCOUNT or AWS_OU, and target id.
target_filter: A callable taking target type, target id, and target name
(which may be None), and returning True if the target should be included.
get_principal_names (bool): Retrieve names for principals in assignments.
get_permission_set_names (bool): Retrieve names for permission sets in assignments.
get_target_names (bool): Retrieve names for targets in assignments.
ou_recursive (bool): Set to True if an OU is provided as a target to get all accounts
including those in child OUs.
Returns:
An iterator over Assignment namedtuples
"""
ids = Ids(lambda: session, instance_arn, identity_store_id)
return _list_assignments(
session,
ids,
principal=principal,
principal_filter=principal_filter,
permission_set=permission_set,
permission_set_filter=permission_set_filter,
target=target,
target_filter=target_filter,
get_principal_names=get_principal_names,
get_permission_set_names=get_permission_set_names,
get_target_names=get_target_names,
ou_recursive=ou_recursive,
)
def _list_assignments(
session,
ids,
principal=None,
principal_filter=None,
permission_set=None,
permission_set_filter=None,
target=None,
target_filter=None,
get_principal_names=False,
get_permission_set_names=False,
get_target_names=False,
ou_recursive=False):
principal = _process_principal(principal)
permission_set = _process_permission_set(ids, permission_set)
target = _process_target(target)
cache = {}
filter_cache = {}
context = _Context(
session = session,
ids=ids,
principal=principal,
principal_filter=principal_filter,
permission_set=permission_set,
permission_set_filter=permission_set_filter,
target=target,
target_filter=target_filter,
get_principal_names=get_principal_names,
get_permission_set_names=get_permission_set_names,
get_target_names=get_target_names,
ou_recursive=ou_recursive,
cache=cache,
filter_cache=filter_cache,
)
target_iterator = _get_target_iterator(context)
permission_set_iterator = _get_permission_set_iterator(context)
principal_iterator = _get_principal_iterator(context)
for target_type, target_id, target_name in target_iterator():
for permission_set_arn, permission_set_id, permission_set_name, in permission_set_iterator(target_type, target_id, target_name):
for principal_type, principal_id, principal_name in principal_iterator(
target_type, target_id, target_name,
permission_set_arn, permission_set_id, permission_set_name):
assignment = Assignment(
ids.instance_arn,
principal_type,
principal_id,
principal_name,
permission_set_arn,
permission_set_name,
target_type,
target_id,
target_name,
)
LOGGER.debug(f"Visiting assignment: {assignment}")
yield assignment
if __name__ == "__main__":
import boto3
import sys
import json
logging.basicConfig(level=logging.INFO)
kwargs = {}
for v in sys.argv[1:]:
if hasattr(logging, v):
LOGGER.setLevel(getattr(logging, v))
else:
kwargs = json.loads(v)
def fil(*args):
print(args)
return True
kwargs["target_filter"] = fil
try:
session = boto3.Session()
print(",".join(Assignment._fields))
for value in list_assignments(session, **kwargs):
print(",".join(v or "" for v in value))
except KeyboardInterrupt:
pass
|
4588
|
from pathlib import PosixPath
import configparser
from typing import Dict, Optional, Any, List
from inspect import cleandoc
import shutil
import tensorhive
import os
import logging
log = logging.getLogger(__name__)
class CONFIG_FILES:
# Where to copy files
# (TensorHive tries to load these by default)
config_dir = PosixPath.home() / '.config/TensorHive'
MAIN_CONFIG_PATH = str(config_dir / 'main_config.ini')
HOSTS_CONFIG_PATH = str(config_dir / 'hosts_config.ini')
MAILBOT_CONFIG_PATH = str(config_dir / 'mailbot_config.ini')
# Where to get file templates from
# (Clone file when it's not found in config directory)
tensorhive_package_dir = PosixPath(__file__).parent
MAIN_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'main_config.ini')
HOSTS_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'hosts_config.ini')
MAILBOT_TEMPLATE_CONFIG_PATH = str(tensorhive_package_dir / 'mailbot_config.ini')
ALEMBIC_CONFIG_PATH = str(tensorhive_package_dir / 'alembic.ini')
MIGRATIONS_CONFIG_PATH = str(tensorhive_package_dir / 'migrations')
class ConfigInitilizer:
'''Makes sure that all default config files exist'''
def __init__(self):
# 1. Check if all config files exist
all_exist = PosixPath(CONFIG_FILES.MAIN_CONFIG_PATH).exists() and \
PosixPath(CONFIG_FILES.HOSTS_CONFIG_PATH).exists() and \
PosixPath(CONFIG_FILES.MAILBOT_CONFIG_PATH).exists()
if not all_exist:
log.warning('[•] Detected missing default config file(s), recreating...')
self.recreate_default_configuration_files()
log.info('[•] All configs already exist, skipping...')
def recreate_default_configuration_files(self) -> None:
try:
# 1. Create directory for stroing config files
CONFIG_FILES.config_dir.mkdir(parents=True, exist_ok=True)
# 2. Clone templates safely from `tensorhive` package
self.safe_copy(src=CONFIG_FILES.MAIN_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.MAIN_CONFIG_PATH)
self.safe_copy(src=CONFIG_FILES.HOSTS_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.HOSTS_CONFIG_PATH)
self.safe_copy(src=CONFIG_FILES.MAILBOT_TEMPLATE_CONFIG_PATH, dst=CONFIG_FILES.MAILBOT_CONFIG_PATH)
# 3. Change config files permission
rw_owner_only = 0o600
os.chmod(CONFIG_FILES.MAIN_CONFIG_PATH, rw_owner_only)
os.chmod(CONFIG_FILES.HOSTS_CONFIG_PATH, rw_owner_only)
os.chmod(CONFIG_FILES.MAILBOT_CONFIG_PATH, rw_owner_only)
except Exception:
log.error('[✘] Unable to recreate configuration files.')
def safe_copy(self, src: str, dst: str) -> None:
'''Safe means that it won't override existing configuration'''
if PosixPath(dst).exists():
log.info('Skipping, file already exists: {}'.format(dst))
else:
shutil.copy(src, dst)
log.info('Copied {} to {}'.format(src, dst))
class ConfigLoader:
@staticmethod
def load(path, displayed_title=''):
import configparser
config = configparser.ConfigParser(strict=False)
full_path = PosixPath(path).expanduser()
if config.read(str(full_path)):
log.info('[•] Reading {} config from {}'.format(displayed_title, full_path))
else:
log.warning('[✘] Configuration file not found ({})'.format(full_path))
log.info('Using default {} settings from config.py'.format(displayed_title))
return config
ConfigInitilizer()
config = ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH, displayed_title='main')
def display_config(cls):
'''
Displays all uppercase class atributes (class must be defined first)
Example usage: display_config(API_SERVER)
'''
print('[{class_name}]'.format(class_name=cls.__name__))
for key, value in cls.__dict__.items():
if key.isupper():
print('{} = {}'.format(key, value))
def check_env_var(name: str):
'''Makes sure that env variable is declared'''
if not os.getenv(name):
msg = cleandoc(
'''
{env} - undeclared environment variable!
Try this: `export {env}="..."`
''').format(env=name).split('\n')
log.warning(msg[0])
log.warning(msg[1])
class SSH:
section = 'ssh'
HOSTS_CONFIG_FILE = config.get(section, 'hosts_config_file', fallback=CONFIG_FILES.HOSTS_CONFIG_PATH)
TEST_ON_STARTUP = config.getboolean(section, 'test_on_startup', fallback=True)
TIMEOUT = config.getfloat(section, 'timeout', fallback=10.0)
NUM_RETRIES = config.getint(section, 'number_of_retries', fallback=1)
KEY_FILE = config.get(section, 'key_file', fallback='~/.config/TensorHive/ssh_key')
def hosts_config_to_dict(path: str) -> Dict: # type: ignore
'''Parses sections containing hostnames'''
hosts_config = ConfigLoader.load(path, displayed_title='hosts')
result = {}
for section in hosts_config.sections():
# We want to parse only sections which describe target hosts
if section == 'proxy_tunneling':
continue
hostname = section
result[hostname] = {
'user': hosts_config.get(hostname, 'user'),
'port': hosts_config.getint(hostname, 'port', fallback=22)
}
return result
def proxy_config_to_dict(path: str) -> Optional[Dict]: # type: ignore
'''Parses [proxy_tunneling] section'''
config = ConfigLoader.load(path, displayed_title='proxy')
section = 'proxy_tunneling'
# Check if section is present and if yes, check if tunneling is enabled
if config.has_section(section) and config.getboolean(section, 'enabled', fallback=False):
return {
'proxy_host': config.get(section, 'proxy_host'),
'proxy_user': config.get(section, 'proxy_user'),
'proxy_port': config.getint(section, 'proxy_port', fallback=22)
}
else:
return None
AVAILABLE_NODES = hosts_config_to_dict(HOSTS_CONFIG_FILE)
PROXY = proxy_config_to_dict(HOSTS_CONFIG_FILE)
class DB:
section = 'database'
default_path = '~/.config/TensorHive/database.sqlite'
def uri_for_path(path: str) -> str: # type: ignore
return 'sqlite:///{}'.format(PosixPath(path).expanduser())
SQLALCHEMY_DATABASE_URI = uri_for_path(config.get(section, 'path', fallback=default_path))
TEST_DATABASE_URI = 'sqlite://' # Use in-memory (before: sqlite:///test_database.sqlite)
class API:
section = 'api'
TITLE = config.get(section, 'title', fallback='TensorHive API')
URL_HOSTNAME = config.get(section, 'url_hostname', fallback='0.0.0.0')
URL_PREFIX = config.get(section, 'url_prefix', fallback='api')
SPEC_FILE = config.get(section, 'spec_file', fallback='api_specification.yml')
IMPL_LOCATION = config.get(section, 'impl_location', fallback='tensorhive.api.controllers')
import yaml
respones_file_path = str(PosixPath(__file__).parent / 'controllers/responses.yml')
with open(respones_file_path, 'r') as file:
RESPONSES = yaml.safe_load(file)
class APP_SERVER:
section = 'web_app.server'
BACKEND = config.get(section, 'backend', fallback='gunicorn')
HOST = config.get(section, 'host', fallback='0.0.0.0')
PORT = config.getint(section, 'port', fallback=5000)
WORKERS = config.getint(section, 'workers', fallback=4)
LOG_LEVEL = config.get(section, 'loglevel', fallback='warning')
class API_SERVER:
section = 'api.server'
BACKEND = config.get(section, 'backend', fallback='gevent')
HOST = config.get(section, 'host', fallback='0.0.0.0')
PORT = config.getint(section, 'port', fallback=1111)
DEBUG = config.getboolean(section, 'debug', fallback=False)
class MONITORING_SERVICE:
section = 'monitoring_service'
ENABLED = config.getboolean(section, 'enabled', fallback=True)
ENABLE_GPU_MONITOR = config.getboolean(section, 'enable_gpu_monitor', fallback=True)
UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0)
class PROTECTION_SERVICE:
section = 'protection_service'
ENABLED = config.getboolean(section, 'enabled', fallback=True)
UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0)
NOTIFY_ON_PTY = config.getboolean(section, 'notify_on_pty', fallback=True)
NOTIFY_VIA_EMAIL = config.getboolean(section, 'notify_via_email', fallback=False)
class MAILBOT:
mailbot_config = ConfigLoader.load(CONFIG_FILES.MAILBOT_CONFIG_PATH, displayed_title='mailbot')
section = 'general'
INTERVAL = mailbot_config.getfloat(section, 'interval', fallback=10.0)
MAX_EMAILS_PER_PROTECTION_INTERVAL = mailbot_config.getint(section,
'max_emails_per_protection_interval', fallback=50)
NOTIFY_INTRUDER = mailbot_config.getboolean(section, 'notify_intruder', fallback=True)
NOTIFY_ADMIN = mailbot_config.getboolean(section, 'notify_admin', fallback=False)
ADMIN_EMAIL = mailbot_config.get(section, 'admin_email', fallback=None)
section = 'smtp'
SMTP_LOGIN = mailbot_config.get(section, 'email', fallback=None)
SMTP_PASSWORD = mailbot_config.get(section, 'password', fallback=None)
SMTP_SERVER = mailbot_config.get(section, 'smtp_server', fallback=None)
SMTP_PORT = mailbot_config.getint(section, 'smtp_port', fallback=587)
section = 'template/intruder'
INTRUDER_SUBJECT = mailbot_config.get(section, 'subject')
INTRUDER_BODY_TEMPLATE = mailbot_config.get(section, 'html_body')
section = 'template/admin'
ADMIN_SUBJECT = mailbot_config.get(section, 'subject')
ADMIN_BODY_TEMPLATE = mailbot_config.get(section, 'html_body')
class USAGE_LOGGING_SERVICE:
section = 'usage_logging_service'
default_path = '~/.config/TensorHive/logs/'
def full_path(path: str) -> str: # type: ignore
return str(PosixPath(path).expanduser())
ENABLED = config.getboolean(section, 'enabled', fallback=True)
UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0)
LOG_DIR = full_path(config.get(section, 'log_dir', fallback=default_path))
LOG_CLEANUP_ACTION = config.getint(section, 'log_cleanup_action', fallback=2)
class JOB_SCHEDULING_SERVICE:
section = 'job_scheduling_service'
ENABLED = config.getboolean(section, 'enabled', fallback=True)
UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=30.0)
STOP_TERMINATION_ATTEMPTS_AFTER = config.getfloat(section, 'stop_termination_attempts_after_mins', fallback=5.0)
SCHEDULE_QUEUED_JOBS_WHEN_FREE_MINS = config.getint(section, "schedule_queued_jobs_when_free_mins", fallback=30)
class AUTH:
from datetime import timedelta
section = 'auth'
def config_get_parsed(option: str, fallback: Any) -> List[str]: # type: ignore
'''
Parses value for option from string to a valid python list.
Fallback value is returned when anything goes wrong (e.g. option or value not present)
Example .ini file, function called with arguments: option='some_option', fallback=None
[some_section]
some_option = ['foo', 'bar']
Will return:
['foo', 'bar']
'''
import ast
try:
raw_arguments = config.get('auth', option)
parsed_arguments = ast.literal_eval(raw_arguments)
return parsed_arguments
except (configparser.Error, ValueError):
log.warning('Parsing [auth] config section failed for option "{}", using fallback value: {}'.format(
option, fallback))
return fallback
FLASK_JWT = {
'SECRET_KEY': config.get(section, 'secrect_key', fallback='jwt-some-secret'),
'JWT_BLACKLIST_ENABLED': config.getboolean(section, 'jwt_blacklist_enabled', fallback=True),
'JWT_BLACKLIST_TOKEN_CHECKS': config_get_parsed('jwt_blacklist_token_checks', fallback=['access', 'refresh']),
'BUNDLE_ERRORS': config.getboolean(section, 'bundle_errors', fallback=True),
'JWT_ACCESS_TOKEN_EXPIRES': timedelta(minutes=config.getint(section, 'jwt_access_token_expires_minutes',
fallback=1)),
'JWT_REFRESH_TOKEN_EXPIRES': timedelta(days=config.getint(section, 'jwt_refresh_token_expires_days',
fallback=1)),
'JWT_TOKEN_LOCATION': config_get_parsed('jwt_token_location', fallback=['headers'])
}
|
4626
|
from django.template import loader, RequestContext
from django.http import Http404, HttpResponse
from django.core.xheaders import populate_xheaders
from django.core.paginator import ObjectPaginator, InvalidPage
from django.core.exceptions import ObjectDoesNotExist
def object_list(request, queryset, paginate_by=None, page=None,
allow_empty=False, template_name=None, template_loader=loader,
extra_context=None, context_processors=None, template_object_name='object',
mimetype=None):
"""
Generic list of objects.
Templates: ``<app_label>/<model_name>_list.html``
Context:
object_list
list of objects
is_paginated
are the results paginated?
results_per_page
number of objects per page (if paginated)
has_next
is there a next page?
has_previous
is there a prev page?
page
the current page
next
the next page
previous
the previous page
pages
number of pages, total
hits
number of objects, total
last_on_page
the result number of the last of object in the
object_list (1-indexed)
first_on_page
the result number of the first object in the
object_list (1-indexed)
"""
if extra_context is None: extra_context = {}
queryset = queryset._clone()
if paginate_by:
paginator = ObjectPaginator(queryset, paginate_by)
if not page:
page = request.GET.get('page', 1)
try:
page = int(page)
object_list = paginator.get_page(page - 1)
except (InvalidPage, ValueError):
if page == 1 and allow_empty:
object_list = []
else:
raise Http404
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'is_paginated': paginator.pages > 1,
'results_per_page': paginate_by,
'has_next': paginator.has_next_page(page - 1),
'has_previous': paginator.has_previous_page(page - 1),
'page': page,
'next': page + 1,
'previous': page - 1,
'last_on_page': paginator.last_on_page(page - 1),
'first_on_page': paginator.first_on_page(page - 1),
'pages': paginator.pages,
'hits' : paginator.hits,
}, context_processors)
else:
c = RequestContext(request, {
'%s_list' % template_object_name: queryset,
'is_paginated': False
}, context_processors)
if not allow_empty and len(queryset) == 0:
raise Http404
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
if not template_name:
model = queryset.model
template_name = "%s/%s_list.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
return HttpResponse(t.render(c), mimetype=mimetype)
def object_detail(request, queryset, object_id=None, slug=None,
slug_field=None, template_name=None, template_name_field=None,
template_loader=loader, extra_context=None,
context_processors=None, template_object_name='object',
mimetype=None):
"""
Generic detail of an object.
Templates: ``<app_label>/<model_name>_detail.html``
Context:
object
the object
"""
if extra_context is None: extra_context = {}
model = queryset.model
if object_id:
queryset = queryset.filter(pk=object_id)
elif slug and slug_field:
queryset = queryset.filter(**{slug_field: slug})
else:
raise AttributeError, "Generic detail view must be called with either an object_id or a slug/slug_field."
try:
obj = queryset.get()
except ObjectDoesNotExist:
raise Http404, "No %s found matching the query" % (model._meta.verbose_name)
if not template_name:
template_name = "%s/%s_detail.html" % (model._meta.app_label, model._meta.object_name.lower())
if template_name_field:
template_name_list = [getattr(obj, template_name_field), template_name]
t = template_loader.select_template(template_name_list)
else:
t = template_loader.get_template(template_name)
c = RequestContext(request, {
template_object_name: obj,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
response = HttpResponse(t.render(c), mimetype=mimetype)
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.name))
return response
|
4729
|
import unittest
from musket_core import coders
import numpy as np
import pandas as pd
import os
import math
fl=__file__
fl=os.path.dirname(fl)
class TestCoders(unittest.TestCase):
def test_binary_num(self):
a=np.array([0,1,0,1])
bc=coders.get_coder("binary",a, None)
self.assertEqual(bc[0], 0, "should be zero")
self.assertEqual(bc[1], 1, "should be one")
v=bc._decode(np.array([0.6]))
self.assertEqual(v, 1, "should be one")
v=bc._decode(np.array([0.2]))
self.assertEqual(v, 0, "should be zero")
pass
def test_binary_str(self):
a=np.array(["0","1","0","1"])
bc=coders.get_coder("binary",a, None)
self.assertEqual(bc[0], 0, "should be zero")
self.assertEqual(bc[1], 1, "should be one")
v=bc._decode(np.array([0.6]))
self.assertEqual(v, "1", "should be one")
v=bc._decode(np.array([0.2]))
self.assertEqual(v, "0", "should be zero")
pass
def test_binary_str2(self):
a=np.array(["","1","","1"])
bc=coders.get_coder("binary",a, None)
self.assertEqual(bc[0], 0, "should be zero")
self.assertEqual(bc[1], 1, "should be one")
v=bc._decode(np.array([0.6]))
self.assertEqual(v, "1", "should be one")
v=bc._decode(np.array([0.2]))
self.assertEqual(v, "", "should be zero")
pass
def test_binary_bool(self):
a=np.array([True,False,True,False])
bc=coders.get_coder("binary",a, None)
self.assertEqual(bc[0], 1, "should be zero")
self.assertEqual(bc[1], 0, "should be one")
v=bc._decode(np.array([0.6]))
self.assertEqual(v, True, "should be one")
v=bc._decode(np.array([0.2]))
self.assertEqual(v, False, "should be zero")
pass
def test_categorical_num(self):
a=np.array([0,1,2,1])
bc=coders.get_coder("categorical_one_hot",a, None)
self.assertEqual(bc[0][0], True, "should be zero")
self.assertEqual(bc[0][1], False, "should be one")
v=bc._decode(np.array([0.3,0.4,0.45]))
self.assertEqual(v, 2, "should be one")
v=bc._decode(np.array([0.2,0.1,0.1]))
self.assertEqual(v, 0, "should be zero")
pass
def test_categorical_str(self):
a=np.array(["a","b","c","b"])
bc=coders.get_coder("categorical_one_hot",a, None)
self.assertEqual(bc[0][0], True, "should be zero")
self.assertEqual(bc[0][1], False, "should be one")
v=bc._decode(np.array([0.3,0.4,0.45]))
self.assertEqual(v, "c", "should be one")
v=bc._decode(np.array([0.2,0.1,0.1]))
self.assertEqual(v, "a", "should be zero")
pass
def test_categorical_str2(self):
a=np.array(["","b","c","b"])
bc=coders.get_coder("categorical_one_hot",a, None)
self.assertEqual(bc[0][0], True, "should be zero")
self.assertEqual(bc[0][1], False, "should be one")
v=bc._decode(np.array([0.3,0.4,0.45]))
self.assertEqual(v, "c", "should be one")
v=bc._decode(np.array([0.2,0.1,0.1]))
self.assertEqual(v, "", "should be zero")
pass
def test_categorical_pd(self):
a=np.array([math.nan,1,2,1])
bc=coders.get_coder("categorical_one_hot",a, None)
self.assertEqual(bc[0][2], True, "should be zero")
self.assertEqual(bc[0][1], False, "should be one")
v=bc._decode(np.array([0.3,0.4,0.45]))
self.assertEqual(math.isnan(v),True, "should be one")
v=bc._decode(np.array([0.2,0.1,0.1]))
self.assertEqual(v, 1, "should be zero")
pass
def test_multiclass(self):
a=np.array(["1 2","0 2","0",""])
bc=coders.get_coder("multi_class",a, None)
val=bc[0]
self.assertEqual((val==np.array([False,True,True])).sum(), 3,"Fixing format")
for i in range(len(a)):
val=bc[i]
r=bc._decode(val)
self.assertEqual(r, a[i], "Decoding should work also")
pass
def test_multiclass1(self):
a=np.array(["1_2","0_2","0",""])
bc=coders.get_coder("multi_class",a, None)
val=bc[0]
self.assertEqual((val==np.array([False,True,True])).sum(), 3,"Fixing format")
for i in range(len(a)):
val=bc[i]
r=bc._decode(val)
self.assertEqual(r, a[i], "Decoding should work also")
pass
def test_multiclass2(self):
a=np.array(["1","","",""])
bc=coders.get_coder("multi_class",a, None)
val=bc[0]
self.assertEqual((val==np.array([True])).sum(), 1,"Fixing format")
for i in range(len(a)):
val=bc[i]
r=bc._decode(val)
self.assertEqual(r, a[i], "Decoding should work also")
pass
|
4732
|
from ..dojo_test_case import DojoTestCase
from dojo.models import Test
from dojo.tools.intsights.parser import IntSightsParser
class TestIntSightsParser(DojoTestCase):
def test_intsights_parser_with_one_critical_vuln_has_one_findings_json(
self):
testfile = open("unittests/scans/intsights/intsights_one_vul.json")
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(1, len(findings))
finding = list(findings)[0]
self.assertEqual(
'5c80dbf83b4a3900078b6be6',
finding.unique_id_from_tool)
self.assertEqual(
'HTTP headers weakness in initech.com web server',
finding.title)
self.assertEquals('Critical', finding.severity)
self.assertEquals(
"https://dashboard.intsights.com/#/threat-command/alerts?search=5c80dbf83b4a3900078b6be6",
finding.references)
def test_intsights_parser_with_one_critical_vuln_has_one_findings_csv(
self):
testfile = open("unittests/scans/intsights/intsights_one_vuln.csv")
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(1, len(findings))
finding = list(findings)[0]
self.assertEqual(
"mn7xy83finmmth4ja363rci9",
finding.unique_id_from_tool)
self.assertEqual(
"HTTP headers weakness in company-domain.com web server",
finding.title)
def test_intsights_parser_with_many_vuln_has_many_findings_json(self):
testfile = open("unittests/scans/intsights/intsights_many_vul.json")
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(3, len(findings))
def test_intsights_parser_with_many_vuln_has_many_findings_csv(self):
testfile = open("unittests/scans/intsights/intsights_many_vuln.csv")
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(9, len(findings))
def test_intsights_parser_invalid_text_with_error_csv(self):
with self.assertRaises(ValueError):
testfile = open(
"unittests/scans/intsights/intsights_invalid_file.txt")
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
|
4740
|
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
# NOT -> ParameterModule
# NOT -> children_and_parameters
# NOT -> flatten_model
# NOT -> lr_range
# NOT -> scheduling functions
# NOT -> SmoothenValue
# YES -> lr_find
# NOT -> plot_lr_find
# NOT TO BE MODIFIED
class ParameterModule(nn.Module):
"Register a lone parameter 'p' in a module"
def __init__(self, p:nn.Parameter):
super().__init__()
self.val = p
def forward(self, x):
return x
# NOT TO BE MODIFIED
# To be used to flatten_model
def children_and_parameters(m:nn.Module):
"Return the children of `m` and its direct parameters not registered in modules."
children = list(m.children())
children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[])
for p in m.parameters():
if id(p) not in children_p: children.append(ParameterModule(p))
return children
# NOT TO BE MODIFIED
flatten_model = lambda m: sum(map(flatten_model,children_and_parameters(m)),[]) if len(list(m.children())) else [m]
# NOT TO BE MODIFIED
def lr_range(model, lr):
"""
Build differential learning rate from lr. It will give you the
Arguments:
model :- torch.nn.Module
lr :- float or slice
Returns:
Depending upon lr
"""
if not isinstance(lr, slice):
return lr
num_layer = len([nn.Sequential(*flatten_model(model))])
if lr.start:
mult = lr.stop / lr.start
step = mult**(1/(num_layer-1))
res = np.array([lr.start*(step**i) for i in range(num_layer)])
else:
res = [lr.stop/10.]*(num_layer-1) + [lr.stop]
return np.array(res)
# NOT TO BE MODIFIED
# These are the functions that would give us the values of lr. Liks for linearly
# increasing lr we would use annealing_linear.
# You can add your own custom function, for producing lr.
# By defualt annealing_exp is used for both lr and momentum
def annealing_no(start, end, pct:float):
"No annealing, always return `start`."
return start
def annealing_linear(start, end, pct:float):
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start + pct * (end-start)
def annealing_exp(start, end, pct:float):
"Exponentially anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start * (end/start) ** pct
def annealing_cos(start, end, pct:float):
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out
def do_annealing_poly(start, end, pct:float, degree):
return end + (start-end) * (1-pct)**degree
# NOT TO BE MODIFIED
class Stepper():
"""
Used to step from start, end ('vals') over 'n_iter' iterations on a schedule.
We will create a stepper object and then use one of the above annelaing functions,
to step from start lr to end lr.
"""
def __init__(self, vals, n_iter:int, func=None):
self.start, self.end = (vals[0], vals[1]) if isinstance(vals, tuple) else (vals,0)
self.n_iter = max(1, n_iter)
if func is None:
self.func = annealing_linear if isinstance(vals, tuple) else annealing_no
else:
self.func = func
self.n = 0
def step(self):
"Return next value along annealed schedule"
self.n += 1
return self.func(self.start, self.end, self.n/self.n_iter)
@property
def is_done(self)->bool:
"Return 'True' if schedule completed"
return self.n >= self.n_iter
# NOT TO BE MODIFIED
class SmoothenValue():
"Create a smooth moving average for a value (loss, etc) using `beta`."
def __init__(self, beta:float):
self.beta,self.n,self.mov_avg = beta,0,0
def add_value(self, val:float)->None:
"Add `val` to calculate updated smoothed value."
self.n += 1
self.mov_avg = self.beta * self.mov_avg + (1 - self.beta) * val
self.smooth = self.mov_avg / (1 - self.beta ** self.n)
# TO BE MODIFIED IN SOME CASES
def lr_find(data_loader, model, loss_fn, opt, wd:int=0, start_lr:float=1e-7, end_lr:float=10,
num_it:int=100, stop_div:bool=True, smooth_beta:float=0.98, use_gpu:bool=True,
device=torch.device('cuda'), anneal_func=annealing_exp):
"""
The main function that you will call to plot learning_rate vs losses graph. It is
the only function from lr_find.py that you will call. By default it will use GPU. It
assumes your model is already on GPU if you use use_gpu.
Arguments:-
data_loader :- torch.utils.data.DataLoader
model :- torch.nn.Module
loss_fn :- torch.nn.LossFunction
opt :- torch.optim.Optimizer
wd :- weight decay (default=0).
start_lr :- The learning rate from where to start in lr_find (default=1e-7)
end_lr :- The learning rate at which to end lr_find (default=10)
num_it :- Number of iterations for lr_find (default=100)
stop_div :- If the loss diverges, then stop early (default=True)
smooth_beta :- The beta value to smoothen the running avergae of the loss function (default=0.98)
use_gpu :- True (train on GPU) else CPU
anneal_func :- The step function you want to use (default exp)
device :- Torch device to use for training model (default GPU)
Returns:
losses :- list of smoothened version of losses
lrs :- list of all lrs that we test
"""
model.train()
stop = False
flag = False
best_loss = 0.
iteration = 0
losses = []
lrs = []
lrs.append(start_lr)
start_lr = lr_range(model, start_lr)
start_lr = np.array(start_lr) if isinstance(start_lr, (tuple, list)) else start_lr
end_lr = lr_range(model, end_lr)
end_lr = np.array(end_lr) if isinstance(end_lr, (tuple, list)) else end_lr
sched = Stepper((start_lr, end_lr), num_it, anneal_func)
smoothener = SmoothenValue(smooth_beta)
epochs = int(np.ceil(num_it/len(data_loader)))
# save model_dict
model_state = model.state_dict()
opt_state = opt.state_dict()
# Set optimizer learning_rate = start_lr
for group in opt.param_groups:
group['lr'] = sched.start
for i in range(epochs):
for data in data_loader:
opt.zero_grad()
################### TO BE MODIFIED ###################
# Depending on your model, you will have to modify your
# data pipeline and how you give inputs to your model.
inputs, labels = data
if use_gpu:
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
loss = loss_fn(outputs, labels)
#####################################################
if use_gpu:
smoothener.add_value(loss.detach().cpu())
else:
smoothener.add_value(loss.detach())
smooth_loss = smoothener.smooth
losses.append(smooth_loss)
loss.backward()
################### TO BE MODIFIED ###################
# For AdamW. If you want to use Adam, comment these lines
for group in opt.param_groups:
for param in group['params']:
param.data = param.data.add(-wd * group['lr'], param.data)
#####################################################
opt.step()
# Change lr
new_lr = sched.step()
lrs.append(new_lr)
for group in opt.param_groups:
group['lr'] = new_lr
################### TO BE MODIFIED ###################
# You necessarily don't want to change it. But in cases
# when you are maximizing the loss, then you will have
# to change it.
if iteration == 0 or smooth_loss < best_loss:
best_loss = smooth_loss
iteration += 1
if sched.is_done or (stop_div and (smooth_loss > 4*best_loss or torch.isnan(loss))):
flag = True
break
#####################################################
if iteration%10 == 0:
print(f'Iteration: {iteration}')
if flag:
break
# Load state dict
model.load_state_dict(model_state)
opt.load_state_dict(opt_state)
lrs.pop()
print(f'LR Finder is complete.')
return losses, lrs
# NOT TO BE MODIFIED
def plot_lr_find(losses, lrs, skip_start:int=10, skip_end:int=5, suggestion:bool=False, return_fig:bool=None):
"""
It will take the losses and lrs returned by lr_find as input.
Arguments:-
skip_start -> It will skip skip_start lrs from the start
skip_end -> It will skip skip_end lrs from the end
suggestion -> If you want to see the point where the gradient changes most
return_fig -> True then get the fig in the return statement
"""
lrs = lrs[skip_start:-skip_end] if skip_end > 0 else lrs[skip_start:]
losses = losses[skip_start:-skip_end] if skip_end > 0 else losses[skip_start:]
losses = [x.item() for x in losses]
fig, ax = plt.subplots(1, 1)
ax.plot(lrs, losses)
ax.set_ylabel("Loss")
ax.set_xlabel("Learning Rate")
ax.set_xscale('log')
ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0e'))
if suggestion:
try:
mg = (np.gradient(np.array(losses))).argmin()
except:
print("Failed to compute the gradients, there might not be enough points.")
return
print(f"Min numerical gradient: {lrs[mg]:.2E}")
ax.plot(lrs[mg], losses[mg], markersize=10, marker='o', color='red')
if return_fig is not None:
return fig
|
4754
|
import unittest
from unittest import TestCase
from misc import verify
class TestVerify(TestCase):
"""Tests misc.py verifies function."""
def test_verify__with_zero_threshold_and_expected_succeeds(self):
"""Test passes when expected rate, actual rate and threshold are all zero."""
result = verify(metric="Query failure rate", actual=0.0, expected=0.0, threshold=0.0)
self.assertEqual(result, 0)
def test_verify__fails_when_positive_delta_is_larger_than_postive_threshold(self):
"""Test fails when positive delta between actual rate and expected rate exceeds positive threshold."""
result = verify(metric="Update latency", actual=200, expected=100, threshold=0.1)
self.assertEqual(result, 1)
def test_verify__fails_when_negative_delta_is_smaller_than_negative_threshold(self):
"""Test fails when negative delta between actual rate and expected rate exceeds negative threshold."""
result = verify(metric="Update latency", actual=50, expected=100, threshold=-0.01)
self.assertEqual(result, 1)
def test_verify__fails_when_negative_delta_and_positive_threshold(self):
"""Test fails when delta between actual rate and expected rate exceeds threshold."""
result = verify(metric="Update latency", actual=50, expected=100, threshold=0.01)
self.assertEqual(result, 0)
if __name__ == "__main__":
unittest.main()
|
4763
|
import re
from rest_framework import serializers
from .models import Collection, CollectionIcon
class CollectionSerializer(serializers.ModelSerializer):
"""Collections's serializer"""
class Meta:
model = Collection
read_only = ('token', )
class CollectionIconSerializer(serializers.ModelSerializer):
"""CollectionIcon's Serializer. """
class Meta:
model = CollectionIcon
def validate_width(self, attrs, source):
width = attrs[source]
if width < 1.0:
raise serializers.ValidationError('Width should be greater than 1.0')
return attrs
def validate_name(self, attrs, source):
name = attrs[source].lower()
name = re.sub(r'[^a-z0-9\-]', '-', name).strip('-')
name = re.sub(r'-+', '-', name)
if name:
attrs[source] = name
else:
raise serializers.ValidationError('Invalid name')
return attrs
def validate(self, attrs):
packicon = attrs.get('packicon')
svg_d = attrs.get('svg_d')
width = attrs.get('width')
if packicon or (svg_d and width): return attrs
raise serializers.ValidationError(
'Either a packicon or the shape of icon should be given'
)
|
4789
|
from braintree.configuration import Configuration
from braintree.resource import Resource
class AccountUpdaterDailyReport(Resource):
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
if "report_url" in attributes:
self.report_url = attributes.pop("report_url")
if "report_date" in attributes:
self.report_date = attributes.pop("report_date")
def __repr__(self):
detail_list = ["report_url", "report_date"]
return super(AccountUpdaterDailyReport, self).__repr__(detail_list)
|
4817
|
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from numpy import array
from numpy import max
map = Basemap(llcrnrlon=-0.5,llcrnrlat=39.8,urcrnrlon=4.,urcrnrlat=43.,
resolution='i', projection='tmerc', lat_0 = 39.5, lon_0 = 1)
map.readshapefile('../sample_files/lightnings', 'lightnings')
x = []
y = []
c = []
for info, lightning in zip(map.lightnings_info, map.lightnings):
x.append(lightning[0])
y.append(lightning[1])
if float(info['amplitude']) < 0:
c.append(-1 * float(info['amplitude']))
else:
c.append(float(info['amplitude']))
plt.figure(0)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y))
map.colorbar(location='bottom')
plt.figure(1)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y), gridsize=20, mincnt=1, cmap='summer', bins='log')
map.colorbar(location='bottom', format='%.1f', label='log(# lightnings)')
plt.figure(2)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y), gridsize=20, mincnt=1, cmap='summer', norm=colors.LogNorm())
cb = map.colorbar(location='bottom', format='%d', label='# lightnings')
cb.set_ticks([1, 5, 10, 15, 20, 25, 30])
cb.set_ticklabels([1, 5, 10, 15, 20, 25, 30])
plt.figure(3)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y), C = array(c), reduce_C_function = max, gridsize=20, mincnt=1, cmap='YlOrBr', linewidths=0.5, edgecolors='k')
map.colorbar(location='bottom', label='Mean amplitude (kA)')
plt.show()
|
4819
|
import html
from collections import namedtuple
from pathlib import Path
from typing import List, Dict
import requests
from bs4 import BeautifulSoup
from lxml import etree
from lxml.etree import XPath
Emoji = namedtuple('Emoji', 'char name')
class EmojiExtractor(object):
def __init__(self):
self.all_emojis = self.fetch_emoji_list()
self.annotations = self.fetch_annotations()
self.base_emojis = self.fetch_base_emojis()
def fetch_emoji_list(self: 'EmojiExtractor') -> List[Emoji]:
print('Downloading list of all emojis')
data = requests.get(
'https://unicode.org/emoji/charts-14.0/full-emoji-list.html',
timeout=120
) # type: requests.Response
html = BeautifulSoup(data.text, 'lxml')
emojis = []
for row in html.find('table').find_all('tr'):
if not row.th:
emoji = row.find('td', {'class': 'chars'}).string
description = row.find('td', {'class': 'name'}).string.replace('⊛ ', '')
emojis.append(Emoji(emoji, description))
return emojis
def fetch_annotations(self: 'EmojiExtractor') -> Dict[chr, List[str]]:
print('Downloading annotations')
data = requests.get(
'https://raw.githubusercontent.com/unicode-org/cldr/latest/common/annotations/en.xml',
timeout=60
) # type: requests.Response
xpath = XPath('./annotations/annotation[not(@type="tts")]')
return {element.get('cp'): element.text.split(' | ')
for element in xpath(etree.fromstring(data.content))}
def fetch_base_emojis(self: 'EmojiExtractor') -> List[chr]:
print('Downloading list of human emojis...')
data = requests.get(
'https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt',
timeout=60
) # type: requests.Response
started = False
emojis = []
for line in data.text.split('\n'):
if not started and line != '# All omitted code points have Emoji_Modifier_Base=No ':
continue
started = True
if line == '# Total elements: 132':
break
if line and not line.startswith('#'):
emojis.extend(self.resolve_character_range(line.split(';')[0].strip()))
return emojis
def resolve_character_range(self, line: str) -> List[str]:
try:
(start, end) = line.split('..')
return [chr(char) for char in range(int(start, 16), int(end, 16) + 1)]
except ValueError:
return [self.resolve_character(line)]
def resolve_character(self, string: str) -> str:
return "".join(chr(int(character, 16)) for character in string.split(' '))
def write_symbol_file(self: 'EmojiExtractor'):
print('Writing collected emojis to symbol file')
with Path('../picker/data/emojis.csv').open('w') as symbol_file:
for entry in self.compile_entries(self.all_emojis):
symbol_file.write(entry + "\n")
def compile_entries(self: 'EmojiExtractor', emojis: List[Emoji]) -> List[str]:
annotated_emojis = []
for emoji in emojis:
entry = f"{emoji.char} {html.escape(emoji.name)}"
if emoji.char in self.annotations:
entry += f" <small>({html.escape(', '.join([annotation for annotation in self.annotations[emoji.char] if annotation != emoji.name]))})</small>"
annotated_emojis.append(entry)
return annotated_emojis
def write_metadata_file(self: 'EmojiExtractor'):
print('Writing metadata to metadata file')
with Path('../picker/copyme.py').open('w') as metadata_file:
metadata_file.write('skin_tone_selectable_emojis={\'')
metadata_file.write('\', \''.join(self.base_emojis))
metadata_file.write('\'}\n')
def extract(self: 'EmojiExtractor'):
self.write_symbol_file()
self.write_metadata_file()
|
4851
|
import glob
import bs4
import gzip
import pickle
import re
import os
from concurrent.futures import ProcessPoolExecutor as PPE
import json
from pathlib import Path
from hashlib import sha256
import shutil
Path('json').mkdir(exist_ok=True)
def sanitize(text):
text = re.sub(r'(\t|\n|\r)', '', text)
text = re.sub(r'\xa0', '', text)
text = re.sub(r'\\r', '', text)
text = re.sub('地図で物件の周辺環境をチェック!', '', text)
return text
def is_train(x):
if '線' in x:
return False
else:
return True
def pmap(arg):
key, fns = arg
SIZE = len(fns)
for index, fn in enumerate(fns):
try:
print('now', key,index, 'size', SIZE, fn)
html = gzip.decompress(open(fn, 'rb').read())
soup = bs4.BeautifulSoup(html, 'lxml')
if soup.find('link', {'rel':'canonical'}) is None:
Path(fn).unlink()
continue
canonical = soup.find('link', {'rel':'canonical'})['href']
if '/detail/' not in canonical:
Path(fn).unlink()
continue
basic_table = soup.find('div', {'class':'detail_basicInfo'})
if basic_table is None:
Path(fn).unlink()
continue
basic_table = basic_table.find('table')
# ズレの処理
tds = list(basic_table.find_all('td'))
tds.pop(0)
#print(tds.pop(0).text)
tds = [td for td in tds if is_train(td)]
print(len(basic_table.find_all('th')), len(tds))
if len(basic_table.find_all('th')) == 13 and len(tds) == 14:
tds.pop(4)
...
basic_obj = {sanitize(th.text):sanitize(td.text) for th, td in zip(basic_table.find_all('th'),tds)}
detail_obj = {}
for table in soup.find('div', {'class':'detail_specTable'}).find_all('table'):
#print(table)
for th, td in zip(table.find_all('th'), table.find_all('td')):
detail_obj[sanitize(th.text)] = sanitize(td.text)
obj = {'basic':basic_obj, 'detail':detail_obj, 'canonical':canonical, 'title':soup.title.text}
last_fn = fn.split('/')[-1]
shutil.move(fn, f'parsed_htmls/{last_fn}' )
with open(f'json/{last_fn}', 'w') as fp:
fp.write(json.dumps(obj, indent=2, ensure_ascii=False))
except Exception as ex:
#Path(fn).unlink()
print(ex)
#detail_table = soup.find('table', {'class':'bukken_detail_table'})
#detail_obj = {re.sub(r'\t', '', th.text):re.sub(r'(\t|\n)', '', td.text) for th, td in zip(detail_table.find_all('th'), detail_table.find_all('td'))}
#print(detail_obj)
#urls = [sha256(bytes(v, 'utf8')).hexdigest() for v in json.load(fp=open('./hash_url.json')).values()]
#fns = [f'./htmls/{url}' for url in urls]
import random
files = glob.glob('./htmls/*')
random.shuffle(files)
args = {}
for index, fn in enumerate(files):
key = index%8
if args.get(key) is None:
args[key] = []
args[key].append(fn)
args = [(key,fns) for key,fns in args.items()]
#[pmap(arg) for arg in args]
with PPE(max_workers=8) as exe:
exe.map(pmap, args)
|
4891
|
md_template_d144 = """verbosity=0
xcFunctional=PBE
FDtype=4th
[Mesh]
nx=160
ny=80
nz=80
[Domain]
ox=0.
oy=0.
oz=0.
lx=42.4813
ly=21.2406
lz=21.2406
[Potentials]
pseudopotential=pseudo.D_tm_pbe
[Poisson]
solver=@
max_steps_initial=@50
max_steps=@50
reset=@
bcx=periodic
bcy=periodic
bcz=periodic
[Run]
type=MD
[MD]
type=@
num_steps=@
dt=@15.
[XLBOMD]
dissipation=@5
align=@
[Quench]
max_steps=@5
max_steps_tight=@
atol=1.e-@10
num_lin_iterations=3
ortho_freq=100
[SpreadPenalty]
type=@energy
damping=@
[email protected]
[email protected]
[Orbitals]
initial_type=Gaussian
initial_width=1.5
overallocate_factor=@2.
[ProjectedMatrices]
solver=@short_sighted
[LocalizationRegions]
radius=@8.
auxiliary_radius=@
[email protected]
[Restart]
input_filename=wave.out
input_level=3
interval=@
"""
md_template_H2O_64 = """verbosity=1
xcFunctional=PBE
FDtype=4th
[Mesh]
nx=128
ny=128
nz=128
[Domain]
ox=0.
oy=0.
oz=0.
lx=23.4884
ly=23.4884
lz=23.4884
[Potentials]
pseudopotential=pseudo.O_ONCV_PBE_SG15
pseudopotential=pseudo.D_ONCV_PBE_SG15
[Poisson]
solver=@
max_steps=@
[Run]
type=MD
[Quench]
max_steps=1000
atol=1.e-@
[MD]
type=@
num_steps=@
dt=10.
print_interval=5
[XLBOMD]
dissipation=@
align=@
[Restart]
input_filename=wave.out
input_level=4
output_level=4
interval=@
"""
quench_template_H2O_64 = """verbosity=1
xcFunctional=PBE
FDtype=4th
[Mesh]
nx=128
ny=128
nz=128
[Domain]
ox=0.
oy=0.
oz=0.
lx=23.4884
ly=23.4884
lz=23.4884
[Potentials]
pseudopotential=pseudo.O_ONCV_PBE_SG15
pseudopotential=pseudo.D_ONCV_PBE_SG15
[Run]
type=QUENCH
[Quench]
max_steps=1000
atol=1.e-8
[Orbitals]
initial_type=Fourier
[Restart]
output_level=4
"""
quench_template_d144 = """verbosity=1
xcFunctional=PBE
FDtype=4th
[Mesh]
nx=160
ny=80
nz=80
[Domain]
ox=0.
oy=0.
oz=0.
lx=42.4813
ly=21.2406
lz=21.2406
[Potentials]
pseudopotential=pseudo.D_tm_pbe
[Poisson]
solver=@
max_steps_initial=@50
max_steps=@50
bcx=periodic
bcy=periodic
bcz=periodic
[Run]
type=QUENCH
[Quench]
max_steps=200
atol=1.e-7
num_lin_iterations=3
ortho_freq=100
[SpreadPenalty]
type=@energy
damping=@
[email protected]
[email protected]
[Orbitals]
initial_type=Gaussian
initial_width=1.5
[ProjectedMatrices]
solver=@short_sighted
[LocalizationRegions]
radius=@8.
[Restart]
output_type=distributed
"""
H2O_64_params={
'nodes': '32',
'ntasks': '256',
'omp_num_threads': 8 if omp_num_threads == 4 else omp_num_threads,
'cores_per_task': '2',
'potentials': 'ln -s $maindir/potentials/pseudo.O_ONCV_PBE_SG15\nln -s $maindir/potentials/pseudo.D_ONCV_PBE_SG15',
'lrs': '',
'jobname': 'H2O_64',
}
d144_params={
'nodes': '8',
'walltime': '01:30:00',
'ntasks': '125',
'omp_num_threads': omp_num_threads,
'cores_per_task': '1',
'potentials': 'ln -s $maindir/potentials/pseudo.D_tm_pbe',
'lrs': '-l lrs.in',
'jobname': 'd144',
}
vulcan_params={
'queue': 'psmall',
'scratch_path': '/p/lscratchv/mgmolu/dunn27/mgmol/',
'gres': 'lscratchv',
'exe': 'mgmol-bgq',
}
cab_params={
'queue': 'pbatch',
'scratch_path': '/p/lscratchd/dunn27/mgmol/',
'gres': 'lscratchd',
'omp_num_threads': '1',
'exe': 'mgmol-pel',
'walltime': '01:30:00',
}
runfile_quench_template="""#!/bin/tcsh
#MSUB -l nodes={nodes},walltime={walltime}
#MSUB -o mgmol.out
#MSUB -q {queue}
#MSUB -A comp
#MSUB -l gres={gres}
#MSUB -N {jobname}
rm -f queued
echo ' ' > running
use boost-nompi-1.55.0
export BOOST_ROOT=/usr/local/tools/boost-nompi-1.55.0
export Boost_NO_SYSTEM_PATHS=ON
setenv OMP_NUM_THREADS {omp_num_threads}
set ntasks = {ntasks}
set maindir = $home/mgmol
set exe = $maindir/bin/{exe}
set datadir = `pwd`
set scratchdir = {scratch_path}`basename $datadir`
mkdir $scratchdir
cd $scratchdir
echo ' ' > running
set cfg_quench = mgmol_quench.cfg
cp $datadir/$cfg_quench .
cp $datadir/coords.in .
cp $datadir/lrs.in .
{potentials}
#1st run
srun -n $ntasks -c {cores_per_task} $exe -c $cfg_quench -i coords.in {lrs}
#restart
rm -f wave.out
set restart_file=`ls -ld * | awk '/snapshot0/ {{ print $9 }}' | tail -n1`
ln -s -f $restart_file wave.out
rm -f running
echo ' ' > queued
"""
runfile_md_template="""#!/bin/tcsh
#MSUB -l nodes={nodes},walltime={walltime}
#MSUB -o mgmol.out
#MSUB -q {queue}
#MSUB -A comp
#MSUB -l gres={gres}
#MSUB -N {jobname}
rm -f queued
echo ' ' > running
use boost-nompi-1.55.0
export BOOST_ROOT=/usr/local/tools/boost-nompi-1.55.0
export Boost_NO_SYSTEM_PATHS=ON
setenv OMP_NUM_THREADS {omp_num_threads}
set ntasks = {ntasks}
set maindir = $home/mgmol
set exe = $maindir/bin/{exe}
set datadir = `pwd`
set scratchdir = {scratch_path}`basename $datadir`
mkdir $scratchdir
cd $scratchdir
echo ' ' > running
set cfg_md = mgmol_md.cfg
cp $datadir/$cfg_md .
#restart
rm -f wave.out
set restart_file=`ls -ld * | awk '/snapshot0/ {{ print $9 }}' | tail -n1`
ln -s -f $restart_file wave.out
#MD run
srun -n $ntasks -c {cores_per_task} $exe -c $cfg_md
#restart
rm -f wave.out
set restart_file=`ls -ld * | awk '/snapshot0/ {{ print $9 }}' | tail -n1`
ln -s -f $restart_file wave.out
rm -f running
echo ' ' > queued
"""
|
4902
|
import torch
import argparse
import os
import sys
import cv2
import time
class Configuration():
def __init__(self):
self.EXP_NAME = 'mobilenetv2_cfbi'
self.DIR_ROOT = './'
self.DIR_DATA = os.path.join(self.DIR_ROOT, 'datasets')
self.DIR_DAVIS = os.path.join(self.DIR_DATA, 'DAVIS')
self.DIR_YTB = os.path.join(self.DIR_DATA, 'YTB/train')
self.DIR_YTB_EVAL = os.path.join(self.DIR_DATA, 'YTB/valid')
self.DIR_RESULT = os.path.join(self.DIR_ROOT, 'result', self.EXP_NAME)
self.DIR_CKPT = os.path.join(self.DIR_RESULT, 'ckpt')
self.DIR_LOG = os.path.join(self.DIR_RESULT, 'log')
self.DIR_IMG_LOG = os.path.join(self.DIR_RESULT, 'log', 'img')
self.DIR_TB_LOG = os.path.join(self.DIR_RESULT, 'log', 'tensorboard')
self.DIR_EVALUATION = os.path.join(self.DIR_RESULT, 'eval')
self.DATASETS = ['youtubevos']
self.DATA_WORKERS = 4
self.DATA_RANDOMCROP = (465, 465)
self.DATA_RANDOMFLIP = 0.5
self.DATA_MAX_CROP_STEPS = 5
self.DATA_MIN_SCALE_FACTOR = 1.
self.DATA_MAX_SCALE_FACTOR = 1.3
self.DATA_SHORT_EDGE_LEN = 480
self.DATA_RANDOM_REVERSE_SEQ = True
self.DATA_DAVIS_REPEAT = 30
self.DATA_CURR_SEQ_LEN = 3
self.DATA_RANDOM_GAP_DAVIS = 3
self.DATA_RANDOM_GAP_YTB = 3
self.PRETRAIN = True
self.PRETRAIN_FULL = False
self.PRETRAIN_MODEL = './pretrain_models/mobilenetv2-deeplabv3p.pth.tar'
self.MODEL_BACKBONE = 'mobilenet'
self.MODEL_MODULE = 'networks.cfbi.cfbi'
self.MODEL_OUTPUT_STRIDE = 16
self.MODEL_ASPP_OUTDIM = 256
self.MODEL_SHORTCUT_DIM = 48
self.MODEL_SEMANTIC_EMBEDDING_DIM = 100
self.MODEL_HEAD_EMBEDDING_DIM = 256
self.MODEL_PRE_HEAD_EMBEDDING_DIM = 64
self.MODEL_GN_GROUPS = 32
self.MODEL_GN_EMB_GROUPS = 25
self.MODEL_MULTI_LOCAL_DISTANCE = [2, 4, 6, 8, 10, 12]
self.MODEL_LOCAL_DOWNSAMPLE = True
self.MODEL_REFINE_CHANNELS = 64 # n * 32
self.MODEL_LOW_LEVEL_INPLANES = 256 if self.MODEL_BACKBONE == 'resnet' else 24
self.MODEL_RELATED_CHANNELS = 64
self.MODEL_EPSILON = 1e-5
self.MODEL_MATCHING_BACKGROUND = True
self.MODEL_GCT_BETA_WD = True
self.MODEL_FLOAT16_MATCHING = True
self.MODEL_FREEZE_BN = True
self.MODEL_FREEZE_BACKBONE = False
self.TRAIN_TOTAL_STEPS = 100000
self.TRAIN_START_STEP = 0
self.TRAIN_LR = 0.01
self.TRAIN_MOMENTUM = 0.9
self.TRAIN_COSINE_DECAY = False
self.TRAIN_WARM_UP_STEPS = 1000
self.TRAIN_WEIGHT_DECAY = 15e-5
self.TRAIN_POWER = 0.9
self.TRAIN_GPUS = 4
self.TRAIN_BATCH_SIZE = 8
self.TRAIN_START_SEQ_TRAINING_STEPS = self.TRAIN_TOTAL_STEPS / 2
self.TRAIN_TBLOG = False
self.TRAIN_TBLOG_STEP = 60
self.TRAIN_LOG_STEP = 20
self.TRAIN_IMG_LOG = False
self.TRAIN_TOP_K_PERCENT_PIXELS = 0.15
self.TRAIN_HARD_MINING_STEP = self.TRAIN_TOTAL_STEPS / 2
self.TRAIN_CLIP_GRAD_NORM = 5.
self.TRAIN_SAVE_STEP = 1000
self.TRAIN_MAX_KEEP_CKPT = 8
self.TRAIN_RESUME = False
self.TRAIN_RESUME_CKPT = None
self.TRAIN_RESUME_STEP = 0
self.TRAIN_AUTO_RESUME = True
self.TRAIN_GLOBAL_ATROUS_RATE = 1
self.TRAIN_LOCAL_ATROUS_RATE = 1
self.TRAIN_GLOBAL_CHUNKS = 20
self.TRAIN_DATASET_FULL_RESOLUTION = True
self.TEST_GPU_ID = 0
self.TEST_DATASET = 'youtubevos'
self.TEST_DATASET_FULL_RESOLUTION = False
self.TEST_DATASET_SPLIT = ['val']
self.TEST_CKPT_PATH = None
self.TEST_CKPT_STEP = None # if "None", evaluate the latest checkpoint.
self.TEST_FLIP = False
self.TEST_MULTISCALE = [1]
self.TEST_MIN_SIZE = None
self.TEST_MAX_SIZE = 800 * 1.3 if self.TEST_MULTISCALE == [1] else 800
self.TEST_WORKERS = 4
self.TEST_GLOBAL_CHUNKS = 4
self.TEST_GLOBAL_ATROUS_RATE = 2
self.TEST_LOCAL_ATROUS_RATE = 1
# dist
self.DIST_ENABLE = True
self.DIST_BACKEND = "gloo"
self.DIST_URL = "file://./sharefile"
self.DIST_START_GPU = 0
self.__check()
def __check(self):
if not torch.cuda.is_available():
raise ValueError('config.py: cuda is not avalable')
if self.TRAIN_GPUS == 0:
raise ValueError('config.py: the number of GPU is 0')
for path in [self.DIR_RESULT, self.DIR_CKPT, self.DIR_LOG, self.DIR_EVALUATION, self.DIR_IMG_LOG, self.DIR_TB_LOG]:
if not os.path.isdir(path):
os.makedirs(path)
cfg = Configuration()
|
4956
|
import random
import math
from functools import partial
import json
import pysndfx
import librosa
import numpy as np
import torch
from ops.audio import (
read_audio, compute_stft, trim_audio, mix_audio_and_labels,
shuffle_audio, cutout
)
SAMPLE_RATE = 44100
class Augmentation:
"""A base class for data augmentation transforms"""
pass
class MapLabels:
def __init__(self, class_map, drop_raw=True):
self.class_map = class_map
def __call__(self, dataset, **inputs):
labels = np.zeros(len(self.class_map), dtype=np.float32)
for c in inputs["raw_labels"]:
labels[self.class_map[c]] = 1.0
transformed = dict(inputs)
transformed["labels"] = labels
transformed.pop("raw_labels")
return transformed
class MixUp(Augmentation):
def __init__(self, p):
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
first_audio, first_labels = inputs["audio"], inputs["labels"]
random_sample = dataset.random_clean_sample()
new_audio, new_labels = mix_audio_and_labels(
first_audio, random_sample["audio"],
first_labels, random_sample["labels"]
)
transformed["audio"] = new_audio
transformed["labels"] = new_labels
return transformed
class FlipAudio(Augmentation):
def __init__(self, p):
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
transformed["audio"] = np.flipud(inputs["audio"])
return transformed
class AudioAugmentation(Augmentation):
def __init__(self, p):
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
effects_chain = (
pysndfx.AudioEffectsChain()
.reverb(
reverberance=random.randrange(50),
room_scale=random.randrange(50),
stereo_depth=random.randrange(50)
)
.pitch(shift=random.randrange(-300, 300))
.overdrive(gain=random.randrange(2, 10))
.speed(random.uniform(0.9, 1.1))
)
transformed["audio"] = effects_chain(inputs["audio"])
return transformed
class LoadAudio:
def __init__(self):
pass
def __call__(self, dataset, **inputs):
audio, sr = read_audio(inputs["filename"])
transformed = dict(inputs)
transformed["audio"] = audio
transformed["sr"] = sr
return transformed
class STFT:
eps = 1e-4
def __init__(self, n_fft, hop_size):
self.n_fft = n_fft
self.hop_size = hop_size
def __call__(self, dataset, **inputs):
stft = compute_stft(
inputs["audio"],
window_size=self.n_fft, hop_size=self.hop_size,
eps=self.eps)
transformed = dict(inputs)
transformed["stft"] = np.transpose(stft)
return transformed
class AudioFeatures:
eps = 1e-4
def __init__(self, descriptor, verbose=True):
name, *args = descriptor.split("_")
self.feature_type = name
if name == "stft":
n_fft, hop_size = args
self.n_fft = int(n_fft)
self.hop_size = int(hop_size)
self.n_features = self.n_fft // 2 + 1
self.padding_value = 0.0
if verbose:
print(
"\nUsing STFT features with params:\n",
"n_fft: {}, hop_size: {}".format(
n_fft, hop_size
)
)
elif name == "mel":
n_fft, hop_size, n_mel = args
self.n_fft = int(n_fft)
self.hop_size = int(hop_size)
self.n_mel = int(n_mel)
self.n_features = self.n_mel
self.padding_value = 0.0
if verbose:
print(
"\nUsing mel features with params:\n",
"n_fft: {}, hop_size: {}, n_mel: {}".format(
n_fft, hop_size, n_mel
)
)
elif name == "raw":
self.n_features = 1
self.padding_value = 0.0
if verbose:
print(
"\nUsing raw waveform features."
)
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if self.feature_type == "stft":
# stft = compute_stft(
# inputs["audio"],
# window_size=self.n_fft, hop_size=self.hop_size,
# eps=self.eps, log=True
# )
transformed["signal"] = np.expand_dims(inputs["audio"], -1)
elif self.feature_type == "mel":
stft = compute_stft(
inputs["audio"],
window_size=self.n_fft, hop_size=self.hop_size,
eps=self.eps, log=False
)
transformed["signal"] = np.expand_dims(inputs["audio"], -1)
elif self.feature_type == "raw":
transformed["signal"] = np.expand_dims(inputs["audio"], -1)
return transformed
class SampleSegment(Augmentation):
def __init__(self, ratio=(0.3, 0.9), p=1.0):
self.min, self.max = ratio
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
original_size = inputs["audio"].size
target_size = int(np.random.uniform(self.min, self.max) * original_size)
start = np.random.randint(original_size - target_size - 1)
transformed["audio"] = inputs["audio"][start:start+target_size]
return transformed
class ShuffleAudio(Augmentation):
def __init__(self, chunk_length=0.5, p=0.5):
self.chunk_length = chunk_length
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
transformed["audio"] = shuffle_audio(
transformed["audio"], self.chunk_length, sr=transformed["sr"])
return transformed
class CutOut(Augmentation):
def __init__(self, area=0.25, p=0.5):
self.area = area
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
transformed["audio"] = cutout(
transformed["audio"], self.area)
return transformed
class SampleLongAudio:
def __init__(self, max_length):
self.max_length = max_length
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if (inputs["audio"].size / inputs["sr"]) > self.max_length:
max_length = self.max_length * inputs["sr"]
start = np.random.randint(0, inputs["audio"].size - max_length)
transformed["audio"] = inputs["audio"][start:start+max_length]
return transformed
class OneOf:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, dataset, **inputs):
transform = random.choice(self.transforms)
return transform(**inputs)
class DropFields:
def __init__(self, fields):
self.to_drop = fields
def __call__(self, dataset, **inputs):
transformed = dict()
for name, input in inputs.items():
if not name in self.to_drop:
transformed[name] = input
return transformed
class RenameFields:
def __init__(self, mapping):
self.mapping = mapping
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
for old, new in self.mapping.items():
transformed[new] = transformed.pop(old)
return transformed
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def switch_off_augmentations(self):
for t in self.transforms:
if isinstance(t, Augmentation):
t.p = 0.0
def __call__(self, dataset=None, **inputs):
for t in self.transforms:
inputs = t(dataset=dataset, **inputs)
return inputs
class Identity:
def __call__(self, dataset=None, **inputs):
return inputs
|
5005
|
import torch
import torch.nn as nn
class EstimatorCV():
def __init__(self, feature_num, class_num):
super(EstimatorCV, self).__init__()
self.class_num = class_num
self.CoVariance = torch.zeros(class_num, feature_num, feature_num)#.cuda()
self.Ave = torch.zeros(class_num, feature_num)#.cuda()
self.Amount = torch.zeros(class_num)#.cuda()
def update_CV(self, features, labels):
N = features.size(0)
C = self.class_num
A = features.size(1)
NxCxFeatures = features.view(
N, 1, A
).expand(
N, C, A
)
onehot = torch.zeros(N, C)#.cuda()
onehot.scatter_(1, labels.view(-1, 1), 1)
NxCxA_onehot = onehot.view(N, C, 1).expand(N, C, A)
features_by_sort = NxCxFeatures.mul(NxCxA_onehot)
Amount_CxA = NxCxA_onehot.sum(0)
Amount_CxA[Amount_CxA == 0] = 1
ave_CxA = features_by_sort.sum(0) / Amount_CxA
var_temp = features_by_sort - \
ave_CxA.expand(N, C, A).mul(NxCxA_onehot)
var_temp = torch.bmm(
var_temp.permute(1, 2, 0),
var_temp.permute(1, 0, 2)
).div(Amount_CxA.view(C, A, 1).expand(C, A, A))
sum_weight_CV = onehot.sum(0).view(C, 1, 1).expand(C, A, A)
sum_weight_AV = onehot.sum(0).view(C, 1).expand(C, A)
weight_CV = sum_weight_CV.div(
sum_weight_CV + self.Amount.view(C, 1, 1).expand(C, A, A)
)
weight_CV[weight_CV != weight_CV] = 0
weight_AV = sum_weight_AV.div(
sum_weight_AV + self.Amount.view(C, 1).expand(C, A)
)
weight_AV[weight_AV != weight_AV] = 0
additional_CV = weight_CV.mul(1 - weight_CV).mul(
torch.bmm(
(self.Ave - ave_CxA).view(C, A, 1),
(self.Ave - ave_CxA).view(C, 1, A)
)
)
self.CoVariance = (self.CoVariance.mul(1 - weight_CV) + var_temp
.mul(weight_CV)).detach() + additional_CV.detach()
self.Ave = (self.Ave.mul(1 - weight_AV) + ave_CxA.mul(weight_AV)).detach()
self.Amount += onehot.sum(0)
class ISDALoss(nn.Module):
def __init__(self, feature_num, class_num):
super(ISDALoss, self).__init__()
self.estimator = EstimatorCV(feature_num, class_num)
self.class_num = class_num
self.cross_entropy = nn.CrossEntropyLoss()
def isda_aug(self, fc, features, y, labels, cv_matrix, ratio):
N = features.size(0)
C = self.class_num
A = features.size(1)
weight_m = list(fc.parameters())[0]
NxW_ij = weight_m.expand(N, C, A)
NxW_kj = torch.gather(NxW_ij,
1,
labels.view(N, 1, 1)
.expand(N, C, A))
CV_temp = cv_matrix[labels]
# sigma2 = ratio * \
# torch.bmm(torch.bmm(NxW_ij - NxW_kj,
# CV_temp).view(N * C, 1, A),
# (NxW_ij - NxW_kj).view(N * C, A, 1)).view(N, C)
sigma2 = ratio * \
torch.bmm(torch.bmm(NxW_ij - NxW_kj,
CV_temp),
(NxW_ij - NxW_kj).permute(0, 2, 1))
sigma2 = sigma2.mul(torch.eye(C)#.cuda()
.expand(N, C, C)).sum(2).view(N, C)
aug_result = y + 0.5 * sigma2
return aug_result
def forward(self, model, fc, x, target_x, ratio):
features = model(x)
y = fc(features)
self.estimator.update_CV(features.detach(), target_x)
isda_aug_y = self.isda_aug(fc, features, y, target_x, self.estimator.CoVariance.detach(), ratio)
loss = self.cross_entropy(isda_aug_y, target_x)
return loss, y
|
5017
|
from functools import partial
from selenium.webdriver import Firefox
from selenium.webdriver.support.ui import (
WebDriverWait
)
def esperar_elemento(elemento, webdriver):
print(f'Tentando encontrar "{elemento}"')
if webdriver.find_elements_by_css_selector(elemento):
return True
return False
esperar_botao = partial(esperar_elemento, 'button')
esperar_sucesso = partial(esperar_elemento, '#finished')
url = 'https://selenium.dunossauro.live/aula_09_a.html'
driver = Firefox()
wdw = WebDriverWait(driver, 10)
driver.get(url)
wdw.until(esperar_botao, 'Deu ruim')
driver.find_element_by_css_selector('button').click()
wdw.until(
esperar_sucesso,
'A mensagem de sucesso não apareceu'
)
sucesso = driver.find_element_by_css_selector('#finished')
assert sucesso.text == 'Carregamento concluído'
|
5039
|
import logging
from web3 import Web3
import sys
import time
import meditation.meditation as meditation
if __name__ == "__main__":
log_format = '%(asctime)s|%(name)s|%(levelname)s: %(message)s'
logger = logging.getLogger("DFK-meditation")
logger.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.INFO, format=log_format, stream=sys.stdout)
rpc_server = 'https://api.harmony.one'
logger.info("Using RPC server " + rpc_server)
private_key = None # set private key
account_address = '0x2E7669F61eA77F02445A015FBdcFe2DE47083E02'
gas_price_gwei = 10
tx_timeout_seconds = 30
w3 = Web3(Web3.HTTPProvider(rpc_server))
active_meditations = meditation.get_active_meditations(account_address, rpc_server)
logger.info("Pending meditation on address " + str(account_address) + ": "+str(active_meditations))
level = 1
hero_id = 1
required_runes = meditation.get_required_runes(level, rpc_server)
meditation.start_meditation(1, meditation.stat2id('strength'), meditation.stat2id('endurance'), meditation.stat2id('luck'),
meditation.ZERO_ADDRESS, private_key, w3.eth.getTransactionCount(account_address),
gas_price_gwei, tx_timeout_seconds, rpc_server, logger)
hero_meditation = meditation.get_hero_meditation(hero_id, rpc_server)
logger.info("Pending meditation "+str(hero_meditation))
time.sleep(5)
meditation.complete_meditation(hero_id, private_key, w3.eth.getTransactionCount(account_address),
gas_price_gwei, tx_timeout_seconds, rpc_server, logger)
|
5042
|
import pygame
import random
pygame.init()
clock = pygame.time.Clock()
fps = 60
#game window
bottom_panel = 150
screen_width = 800
screen_height = 400 + bottom_panel
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption('Battle')
#define game variables
current_fighter = 1
total_fighters = 3
action_cooldown = 0
action_wait_time = 90
attack = False
potion = False
clicked = False
#define fonts
font = pygame.font.SysFont('Times New Roman', 26)
#define colours
red = (255, 0, 0)
green = (0, 255, 0)
#load images
#background image
background_img = pygame.image.load('img/Background/background.png').convert_alpha()
#panel image
panel_img = pygame.image.load('img/Icons/panel.png').convert_alpha()
#sword image
sword_img = pygame.image.load('img/Icons/sword.png').convert_alpha()
#create function for drawing text
def draw_text(text, font, text_col, x, y):
img = font.render(text, True, text_col)
screen.blit(img, (x, y))
#function for drawing background
def draw_bg():
screen.blit(background_img, (0, 0))
#function for drawing panel
def draw_panel():
#draw panel rectangle
screen.blit(panel_img, (0, screen_height - bottom_panel))
#show knight stats
draw_text(f'{knight.name} HP: {knight.hp}', font, red, 100, screen_height - bottom_panel + 10)
for count, i in enumerate(bandit_list):
#show name and health
draw_text(f'{i.name} HP: {i.hp}', font, red, 550, (screen_height - bottom_panel + 10) + count * 60)
#fighter class
class Fighter():
def __init__(self, x, y, name, max_hp, strength, potions):
self.name = name
self.max_hp = max_hp
self.hp = max_hp
self.strength = strength
self.start_potions = potions
self.potions = potions
self.alive = True
self.animation_list = []
self.frame_index = 0
self.action = 0#0:idle, 1:attack, 2:hurt, 3:dead
self.update_time = pygame.time.get_ticks()
#load idle images
temp_list = []
for i in range(8):
img = pygame.image.load(f'img/{self.name}/Idle/{i}.png')
img = pygame.transform.scale(img, (img.get_width() * 3, img.get_height() * 3))
temp_list.append(img)
self.animation_list.append(temp_list)
#load attack images
temp_list = []
for i in range(8):
img = pygame.image.load(f'img/{self.name}/Attack/{i}.png')
img = pygame.transform.scale(img, (img.get_width() * 3, img.get_height() * 3))
temp_list.append(img)
self.animation_list.append(temp_list)
self.image = self.animation_list[self.action][self.frame_index]
self.rect = self.image.get_rect()
self.rect.center = (x, y)
def update(self):
animation_cooldown = 100
#handle animation
#update image
self.image = self.animation_list[self.action][self.frame_index]
#check if enough time has passed since the last update
if pygame.time.get_ticks() - self.update_time > animation_cooldown:
self.update_time = pygame.time.get_ticks()
self.frame_index += 1
#if the animation has run out then reset back to the start
if self.frame_index >= len(self.animation_list[self.action]):
self.idle()
def idle(self):
#set variables to attack animation
self.action = 0
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def attack(self, target):
#deal damage to enemy
rand = random.randint(-5, 5)
damage = self.strength + rand
target.hp -= damage
#check if target has died
if target.hp < 1:
target.hp = 0
target.alive = False
#set variables to attack animation
self.action = 1
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def draw(self):
screen.blit(self.image, self.rect)
class HealthBar():
def __init__(self, x, y, hp, max_hp):
self.x = x
self.y = y
self.hp = hp
self.max_hp = max_hp
def draw(self, hp):
#update with new health
self.hp = hp
#calculate health ratio
ratio = self.hp / self.max_hp
pygame.draw.rect(screen, red, (self.x, self.y, 150, 20))
pygame.draw.rect(screen, green, (self.x, self.y, 150 * ratio, 20))
knight = Fighter(200, 260, 'Knight', 30, 10, 3)
bandit1 = Fighter(550, 270, 'Bandit', 20, 6, 1)
bandit2 = Fighter(700, 270, 'Bandit', 20, 6, 1)
bandit_list = []
bandit_list.append(bandit1)
bandit_list.append(bandit2)
knight_health_bar = HealthBar(100, screen_height - bottom_panel + 40, knight.hp, knight.max_hp)
bandit1_health_bar = HealthBar(550, screen_height - bottom_panel + 40, bandit1.hp, bandit1.max_hp)
bandit2_health_bar = HealthBar(550, screen_height - bottom_panel + 100, bandit2.hp, bandit2.max_hp)
run = True
while run:
clock.tick(fps)
#draw background
draw_bg()
#draw panel
draw_panel()
knight_health_bar.draw(knight.hp)
bandit1_health_bar.draw(bandit1.hp)
bandit2_health_bar.draw(bandit2.hp)
#draw fighters
knight.update()
knight.draw()
for bandit in bandit_list:
bandit.update()
bandit.draw()
#control player actions
#reset action variables
attack = False
potion = False
target = None
#make sure mouse is visible
pygame.mouse.set_visible(True)
pos = pygame.mouse.get_pos()
for count, bandit in enumerate(bandit_list):
if bandit.rect.collidepoint(pos):
#hide mouse
pygame.mouse.set_visible(False)
#show sword in place of mouse cursor
screen.blit(sword_img, pos)
if clicked == True:
attack = True
target = bandit_list[count]
#player action
if knight.alive == True:
if current_fighter == 1:
action_cooldown += 1
if action_cooldown >= action_wait_time:
#look for player action
#attack
if attack == True and target != None:
knight.attack(target)
current_fighter += 1
action_cooldown = 0
#enemy action
for count, bandit in enumerate(bandit_list):
if current_fighter == 2 + count:
if bandit.alive == True:
action_cooldown += 1
if action_cooldown >= action_wait_time:
#attack
bandit.attack(knight)
current_fighter += 1
action_cooldown = 0
else:
current_fighter += 1
#if all fighters have had a turn then reset
if current_fighter > total_fighters:
current_fighter = 1
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
clicked = True
else:
clicked = False
pygame.display.update()
pygame.quit()
|
5048
|
from .supervise import *
def get_losses(name, **kwargs):
name = name.lower()
if name == 'rhloss':
loss = RHLoss(**kwargs)
elif name == 'xtloss':
loss = XTLoss(**kwargs)
else:
raise NotImplementedError('Loss [{:s}] is not supported.'.format(name))
return loss
|
5077
|
from stix_shifter_utils.stix_translation.src.json_to_stix import json_to_stix_translator
from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers
from stix_shifter_modules.aws_athena.entry_point import EntryPoint
import unittest
MODULE = "aws_athena"
entry_point = EntryPoint()
map_data = entry_point.get_results_translator().map_data
data_source = {
"type": "identity",
"id": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"name": "aws_athena",
"identity_class": "events"
}
options = {}
class TestAwsResultsToStix(unittest.TestCase):
"""
class to perform unit test case for Aws Athena logs translate results
"""
@staticmethod
def get_first(itr, constraint):
"""
return the obj in the itr if constraint is true
"""
return next(
(obj for obj in itr if constraint(obj)),
None
)
@staticmethod
def get_first_of_type(itr, typ):
"""
to check whether the object belongs to respective stix object
"""
return TestAwsResultsToStix.get_first(itr, lambda o: isinstance(o, dict) and o.get('type') == typ)
def test_common_prop(self):
"""
to test the common stix object properties
"""
data = {
"guardduty": {
"accountid": 979326520502,
"region": "us-east-1",
"type": "UnauthorizedAccess:EC2/SSHBruteForce",
"resource_instancedetails_networkinterfaces_0_privatednsname": "ip-172-31-60-104.ec2.internal",
"resource_instancedetails_networkinterfaces_0_privateipaddress": "172.31.60.104",
"resource_instancedetails_networkinterfaces_0_subnetid": "subnet-ea9d6be4",
"resource_instancedetails_networkinterfaces_0_publicdnsname": "ec2-18-210-22-128.compute-1."
"amazonaws.com",
"resource_instancedetails_networkinterfaces_0_vpcid": "vpc-10db926a",
"resource_instancedetails_networkinterfaces_0_publicip": "172.16.31.10",
"resource_instancedetails_networkinterfaces_0_networkinterfaceid": "eni-0203098cca62c3f21",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupid": "sg-018edb43fcc81525f",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupname": "launch-wizard-13",
"resource_instancedetails_imageid": "ami-0015fcaa5516c75ed",
"resource_instancedetails_instanceid": "i-031cb81e1f32a36e1",
"resource_instancedetails_availabilityzone": "us-east-1f",
"service_eventfirstseen": "2020-07-31T06:19:09Z",
"service_action_networkconnectionaction_protocol": "TCP",
"service_action_networkconnectionaction_remoteportdetails_port": "38420",
"service_action_networkconnectionaction_remoteipdetails_country_countryname": "Sweden",
"service_action_networkconnectionaction_remoteipdetails_ipaddressv4": "172.16.31.10",
"service_action_networkconnectionaction_remoteipdetails_city_cityname": "\u00d6rebro",
"service_action_networkconnectionaction_localportdetails_port": "22",
"service_eventlastseen": "2020-09-12T09:19:40Z",
"severity": 2,
"title": "85.224.242.94 is performing SSH brute force attacks against i-031cb81e1f32a36e1.",
"arn": "arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed494f3b7ca56acdc74df/finding/"
"7ab9d1cb6248e05a0e419a79528761cb",
"createdat": "2020-07-31T06:37:13.745Z",
"description": "172.16.31.10 is performing SSH brute force attacks against i-031cb81e1f32a36e1. "
"Brute force attacks are used to gain unauthorized access to your instance by "
"guessing the SSH password.",
"finding_id": "7ab9d1cb6248e05a0e419a79528761cb",
"partition": "aws",
"resource": {
"instancedetails": {
"imagedescription": "Provided by Red Hat, Inc.",
"instancestate": "running",
"instancetype": "t2.large",
"launchtime": "2020-09-11T23:16:03Z",
"tags": {
"0": {
"key": "Name",
"value": "ArcSight Logger"
}
}
},
"resourcetype": "Instance"
},
"schemaversion": 2.0,
"service": {
"action": {
"actiontype": "NETWORK_CONNECTION",
"networkconnectionaction": {
"connectiondirection": "INBOUND",
"localportdetails": {
"portname": "SSH"
},
"remoteipdetails": {
"geolocation": {
"lat": "59.2741",
"lon": "15.2066"
},
"organization": {
"asn": "2119",
"asnorg": "Telenor Norge AS",
"isp": "Telenor Sverige AB",
"org": "Telenor Sverige AB"
}
},
"remoteportdetails": {
"portname": "Unknown"
}
}
},
"count": "20",
"detectorid": "6ab6e6ee780ed494f3b7ca56acdc74df",
"resourcerole": "TARGET",
"servicename": "guardduty"
},
"updatedat": "2020-09-12T09:25:34.086Z"
}
}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
assert result_bundle['type'] == 'bundle'
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
assert result_bundle_identity['id'] == data_source['id']
assert result_bundle_identity['name'] == data_source['name']
assert result_bundle_identity['identity_class'] == data_source['identity_class']
observed_data = result_bundle_objects[1]
assert observed_data['id'] is not None
assert observed_data['type'] == "observed-data"
assert observed_data['created_by_ref'] == result_bundle_identity['id']
assert observed_data['created'] is not None
assert observed_data['modified'] is not None
assert observed_data['number_observed'] is not None
def test_vpc_flow_network_json_to_stix(self):
"""to test network stix object properties"""
data = {
"vpcflow": {
"account": 979326520502,
"interfaceid": "eni-04b762de832716892",
"sourceaddress": "192.168.127.12",
"destinationaddress": "172.31.62.249",
"sourceport": 58387,
"destinationport": 51289,
"protocol": "tcp",
"starttime": 1592547796,
"endtime": 1592547798,
"action": "REJECT",
"date": "2020-06-19",
"logstatus": "OK",
"numbytes": 40,
"region": "us-east-1",
"version": 2
}
}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
network_obj = TestAwsResultsToStix.get_first_of_type(objects.values(), 'network-traffic')
assert network_obj is not None, 'network-traffic object type not found'
assert network_obj.keys() == {'type', 'src_ref', 'dst_ref', 'src_port', 'dst_port', 'protocols', 'start', 'end'}
assert network_obj['type'] == 'network-traffic'
assert network_obj['src_ref'] == '1'
assert network_obj['dst_ref'] == '4'
assert network_obj['src_port'] == 58387
assert network_obj['dst_port'] == 51289
assert network_obj['protocols'] == ['tcp']
assert network_obj['start'] == '2020-06-19T06:23:16.000Z'
assert network_obj['end'] == '2020-06-19T06:23:18.000Z'
def test_vpc_flow_custom_attr_json_to_stix(self):
"""to test network stix object properties"""
data = {
"vpcflow": {
"account": 979326520502,
"interfaceid": "eni-04b762de832716892",
"sourceaddress": "192.168.127.12",
"destinationaddress": "172.31.62.249",
"sourceport": 58387,
"destinationport": 51289,
"protocol": "tcp",
"starttime": 1592547796,
"endtime": 1592547798,
"action": "REJECT",
"date": "2020-06-19",
"logstatus": "OK",
"numbytes": 40,
"region": "us-east-1",
"version": 2
}
}
options = {"unmapped_fallback": True}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
custom_object = TestAwsResultsToStix.get_first_of_type(objects.values(), 'x-aws-athena')
assert custom_object.keys() == {'type', 'interfaceid', 'date', 'logstatus', 'numbytes', 'region', 'version'}
assert custom_object['date'] == '2020-06-19'
assert custom_object['logstatus'] == 'OK'
assert custom_object['numbytes'] == 40
assert custom_object['region'] == 'us-east-1'
assert custom_object['version'] == 2
def test_guardduty_network_json_to_stix(self):
"""to test network stix object properties"""
data = {
"guardduty": {
"accountid": 979326520502,
"region": "us-east-1",
"type": "UnauthorizedAccess:EC2/SSHBruteForce",
"resource_instancedetails_networkinterfaces_0_privatednsname": "ip-172-31-60-104.ec2.internal",
"resource_instancedetails_networkinterfaces_0_privateipaddress": "172.31.60.104",
"resource_instancedetails_networkinterfaces_0_subnetid": "subnet-ea9d6be4",
"resource_instancedetails_networkinterfaces_0_publicdnsname": "ec2-18-210-22-128.compute-1."
"amazonaws.com",
"resource_instancedetails_networkinterfaces_0_vpcid": "vpc-10db926a",
"resource_instancedetails_networkinterfaces_0_publicip": "172.16.31.10",
"resource_instancedetails_networkinterfaces_0_networkinterfaceid": "eni-0203098cca62c3f21",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupid": "sg-018edb43fcc81525f",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupname": "launch-wizard-13",
"resource_instancedetails_imageid": "ami-0015fcaa5516c75ed",
"resource_instancedetails_instanceid": "i-031cb81e1f32a36e1",
"resource_instancedetails_availabilityzone": "us-east-1f",
"service_eventfirstseen": "2020-07-31T06:19:09Z",
"service_action_networkconnectionaction_protocol": "TCP",
"service_action_networkconnectionaction_remoteportdetails_port": "38420",
"service_action_networkconnectionaction_remoteipdetails_country_countryname": "Sweden",
"service_action_networkconnectionaction_remoteipdetails_ipaddressv4": "172.16.31.10",
"service_action_networkconnectionaction_remoteipdetails_city_cityname": "rebro",
"service_action_networkconnectionaction_localportdetails_port": "22",
"service_eventlastseen": "2020-09-12T09:19:40Z",
"severity": 2,
"title": "172.16.31.10 is performing SSH brute force attacks against i-031cb81e1f32a36e1.",
"arn": "arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed494f3b7ca56acdc74df/finding"
"/7ab9d1cb6248e05a0e419a79528761cb",
"createdat": "2020-07-31T06:37:13.745Z",
"description": "172.16.31.10 is performing SSH brute force attacks against i-031cb81e1f32a36e1. "
"Brute force attacks are used to gain unauthorized access to your instance by "
"guessing the SSH password.",
"finding_id": "7ab9d1cb6248e05a0e419a79528761cb",
"partition": "aws",
"resource": {
"instancedetails": {
"imagedescription": "Provided by Red Hat, Inc.",
"instancestate": "running",
"instancetype": "t2.large",
"launchtime": "2020-09-11T23:16:03Z",
"tags": {
"0": {
"key": "Name",
"value": "<NAME>"
}
}
},
"resourcetype": "Instance"
},
"schemaversion": 2.0,
"service": {
"action": {
"actiontype": "NETWORK_CONNECTION",
"networkconnectionaction": {
"connectiondirection": "INBOUND",
"localportdetails": {
"portname": "SSH"
},
"remoteipdetails": {
"geolocation": {
"lat": "59.2741",
"lon": "15.2066"
},
"organization": {
"asn": "2119",
"asnorg": "Telenor Norge AS",
"isp": "Telenor Sverige AB",
"org": "Telenor Sverige AB"
}
},
"remoteportdetails": {
"portname": "Unknown"
}
}
},
"count": "20",
"detectorid": "6ab6e6ee780ed494f3b7ca56acdc74df",
"resourcerole": "TARGET",
"servicename": "guardduty"
},
"updatedat": "2020-09-12T09:25:34.086Z"
}
}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
network_obj = TestAwsResultsToStix.get_first_of_type(objects.values(), 'network-traffic')
assert network_obj is not None, 'network-traffic object type not found'
assert network_obj.keys() == {'type', 'dst_port', 'src_ref', 'dst_ref', 'src_port', 'protocols'}
assert network_obj['type'] == 'network-traffic'
assert network_obj['dst_port'] == 38420
assert network_obj['src_ref'] == '3'
assert network_obj['dst_ref'] == '9'
assert network_obj['src_port'] == 22
assert network_obj['protocols'] == ['tcp']
def test_guardduty_custom_attr_json_to_stix(self):
"""to test network stix object properties"""
data = {
"guardduty": {
"accountid": 979326520502,
"region": "us-east-1",
"type": "UnauthorizedAccess:EC2/SSHBruteForce",
"resource_instancedetails_networkinterfaces_0_privatednsname": "ip-172-31-60-104.ec2.internal",
"resource_instancedetails_networkinterfaces_0_privateipaddress": "172.31.60.104",
"resource_instancedetails_networkinterfaces_0_subnetid": "subnet-ea9d6be4",
"resource_instancedetails_networkinterfaces_0_publicdnsname": "ec2-18-210-22-128.compute-1."
"amazonaws.com",
"resource_instancedetails_networkinterfaces_0_vpcid": "vpc-10db926a",
"resource_instancedetails_networkinterfaces_0_publicip": "172.16.31.10",
"resource_instancedetails_networkinterfaces_0_networkinterfaceid": "eni-0203098cca62c3f21",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupid": "sg-018edb43fcc81525f",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupname": "launch-wizard-13",
"resource_instancedetails_imageid": "ami-0015fcaa5516c75ed",
"resource_instancedetails_instanceid": "i-031cb81e1f32a36e1",
"resource_instancedetails_availabilityzone": "us-east-1f",
"service_eventfirstseen": "2020-07-31T06:19:09Z",
"service_action_networkconnectionaction_protocol": "TCP",
"service_action_networkconnectionaction_remoteportdetails_port": "38420",
"service_action_networkconnectionaction_remoteipdetails_country_countryname": "Sweden",
"service_action_networkconnectionaction_remoteipdetails_ipaddressv4": "172.16.31.10",
"service_action_networkconnectionaction_remoteipdetails_city_cityname": "rebro",
"service_action_networkconnectionaction_localportdetails_port": "22",
"service_eventlastseen": "2020-09-12T09:19:40Z",
"severity": 2,
"title": "172.16.31.10 is performing SSH brute force attacks against i-031cb81e1f32a36e1.",
"arn": "arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed494f3b7ca56acdc74df/finding/"
"7ab9d1cb6248e05a0e419a79528761cb",
"createdat": "2020-07-31T06:37:13.745Z",
"description": "172.16.31.10 is performing SSH brute force attacks against i-031cb81e1f32a36e1."
" Brute force attacks are used to gain unauthorized access to your instance by guessing "
"the SSH password.",
"finding_id": "7ab9d1cb6248e05a0e419a79528761cb",
"partition": "aws",
"resource": {
"instancedetails": {
"imagedescription": "Provided by Red Hat, Inc.",
"instancestate": "running",
"instancetype": "t2.large",
"launchtime": "2020-09-11T23:16:03Z",
"tags": {
"0": {
"key": "Name",
"value": "ArcSight Logger"
}
}
},
"resourcetype": "Instance"
},
"schemaversion": 2.0,
"service": {
"action": {
"actiontype": "NETWORK_CONNECTION",
"networkconnectionaction": {
"connectiondirection": "INBOUND",
"localportdetails": {
"portname": "SSH"
},
"remoteipdetails": {
"geolocation": {
"lat": "59.2741",
"lon": "15.2066"
},
"organization": {
"asn": "2119",
"asnorg": "Telenor Norge AS",
"isp": "Telenor Sverige AB",
"org": "Telenor Sverige AB"
}
},
"remoteportdetails": {
"portname": "Unknown"
}
}
},
"count": "20",
"detectorid": "6ab6e6ee780ed494f3b7ca56acdc74df",
"resourcerole": "TARGET",
"servicename": "guardduty"
},
"updatedat": "2020-09-12T09:25:34.086Z"
}
}
options = {"unmapped_fallback": True}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
custom_object = TestAwsResultsToStix.get_first_of_type(objects.values(), 'x-aws-athena')
assert custom_object.keys() == {'type', 'service_action_networkconnectionaction_remoteipdetails_country_countryname',
'finding_id', 'arn', 'createdat', 'partition', 'resource',
'schemaversion', 'service', 'updatedat'}
assert custom_object['arn'] == 'arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed' \
'494f3b7ca56acdc74df/finding/7ab9d1cb6248e05a0e419a79528761cb'
assert custom_object['finding_id'] == '7ab9d1cb6248e05a0e419a79528761cb'
assert custom_object['createdat'] == '2020-07-31T06:37:13.745Z'
assert custom_object['partition'] == 'aws'
assert custom_object['schemaversion'] == 2.0
assert custom_object['updatedat'] == '2020-09-12T09:25:34.086Z'
|
5112
|
from llvmlite import ir
import xml.etree.ElementTree as et
int32 = ir.IntType(32)
int64 = ir.IntType(64)
int1 = ir.IntType(1)
void_type = ir.VoidType()
function_names = []
registers, functions, uniques, extracts = {}, {}, {}, {}
internal_functions = {}
memory = {}
flags = ["ZF", "CF", "OF", "SF"]
pointers = ["RSP", "RIP", "RBP", "EBP", "ESP"]
def lift(filename):
root = et.parse(filename).getroot()
module = ir.Module(name="lifted")
for register in root.find('globals').findall('register'):
if register.get('name') in flags:
var = ir.GlobalVariable(module, ir.IntType(1), register.get('name'))
var.initializer = ir.Constant(ir.IntType(1), None)
var.linkage = 'internal'
registers[register.get('name')] = var
elif register.get('name') in pointers:
var = ir.GlobalVariable(module, ir.PointerType(ir.IntType(8)), register.get('name'))
var.initializer = ir.Constant(ir.PointerType(ir.IntType(8)), None)
var.linkage = 'internal'
registers[register.get('name')] = var
else:
var = ir.GlobalVariable(module, ir.IntType(8 * int(register.get('size'))), register.get('name'))
var.initializer = ir.Constant(ir.IntType(8 * int(register.get('size'))), None)
var.linkage = 'internal'
registers[register.get('name')] = var
for memory_location in root.find('memory').findall('memory'):
var = ir.GlobalVariable(module, ir.IntType(8 * int(memory_location.get('size'))), memory_location.get('name'))
var.initializer = ir.Constant(ir.IntType(8 * int(memory_location.get('size'))), None)
var.linkage = 'internal'
memory[memory_location.get('name')] = var
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "intra_function_branch")
internal_functions["intra_function_branch"] = ir_func
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "call_indirect")
internal_functions["call_indirect"] = ir_func
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "bit_extraction")
internal_functions["bit_extraction"] = ir_func
for function in root.findall('function'):
name = function.get('name')
x = 1
while name in function_names:
name = name + "_" + str(x)
x += 1
function_names.append(name)
address = function.get('address')
functions[address] = [build_function(name, module), function]
for address in functions:
ir_func, function = functions[address]
populate_func(ir_func, function)
return module
def populate_func(ir_func, function):
builders, blocks = build_cfg(function, ir_func)
if blocks == {}:
return
populate_cfg(function, builders, blocks)
def build_function(name, module):
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, name)
return ir_func
def build_cfg(function, ir_func):
builders, blocks = {}, {}
instructions = function.find("instructions")
if instructions:
block = ir_func.append_basic_block("entry")
blocks["entry"] = block
builders["entry"] = ir.IRBuilder(block)
for instruction in instructions:
address = instruction.find("address").text
block = ir_func.append_basic_block(address)
blocks[address] = block
builders[address] = ir.IRBuilder(block)
return builders, blocks
# noinspection DuplicatedCode
def populate_cfg(function, builders, blocks):
builder = builders["entry"]
stack_size = 10 * 1024 * 1024
stack = builder.alloca(ir.IntType(8), stack_size, name="stack")
stack_top = builder.gep(stack, [ir.Constant(int64, stack_size - 8)], name="stack_top")
builder.store(stack_top, registers["RSP"])
builder.branch(list(blocks.values())[1])
block_iterator = 1
instr = 0
quiter = False
for instruction in function.find("instructions"):
if quiter:
break
address = instruction.find("address").text
if address in builders:
builder = builders[address]
pcodes = instruction.find("pcodes")
pc = 0
no_branch = True
for pcode in pcodes:
pc += 1
mnemonic = pcode.find("name")
if mnemonic.text == "COPY":
output = pcode.find("output")
if output.text in flags and pcode.find("input_0").get("storage") == "constant":
source = ir.Constant(ir.IntType(1), int(pcode.find("input_0").text, 0))
else:
source = fetch_input_varnode(builder, pcode.find("input_0"))
update_output(builder, pcode.find("output"), source)
elif mnemonic.text == "LOAD":
input_1 = pcode.find("input_1")
output = pcode.find("output")
rhs = fetch_input_varnode(builder, input_1)
if input_1.get("storage") == "unique" and output.get("storage") == "unique":
# This is incorrect. This is treating it as a copy, should load the memory address in the input 1
update_output(builder, output, rhs)
else:
if input_1.text in pointers:
rhs = builder.gep(rhs, [ir.Constant(int64, 0)])
result = builder.load(rhs)
update_output(builder, output, result)
elif mnemonic.text == "STORE":
input_1 = pcode.find("input_1") # target
input_2 = pcode.find("input_2") # source
rhs = fetch_input_varnode(builder, input_2)
lhs = fetch_output_varnode(input_1)
lhs2 = builder.gep(lhs, [ir.Constant(int64, 0)])
if lhs2.type != rhs.type.as_pointer():
lhs2 = builder.bitcast(lhs2, rhs.type.as_pointer())
builder.store(rhs, lhs2)
elif mnemonic.text == "BRANCH":
value = pcode.find("input_0").text[2:-2]
if value in functions:
target = functions[value][0]
builder.call(target, [])
elif value in blocks:
target = blocks[value]
builder.branch(target)
no_branch = False
else:
# weird jump into some label in another function
# might be solved with callbr instruction?
builder.call(internal_functions["intra_function_branch"], [])
elif mnemonic.text == "CBRANCH":
true_target = blocks[pcode.find("input_0").text[2:-2]]
false_target = list(blocks.values())[block_iterator + 1]
condition = fetch_input_varnode(builder, pcode.find("input_1"))
no_branch = False
builder.cbranch(condition, true_target, false_target)
elif mnemonic.text == "BRANCHIND":
no_branch = False
target = fetch_input_varnode(builder, pcode.find("input_0"))
if not target.type.is_pointer:
target = builder.inttoptr(target, target.type.as_pointer())
builder.branch_indirect(target)
elif mnemonic.text == "CALL":
target = functions[pcode.find("input_0").text[2:-2]][0]
builder.call(target, [])
elif mnemonic.text == "CALLIND":
# target = pcode.find("input_0").text[2:-2]
builder.call(internal_functions["call_indirect"], [])
elif mnemonic.text == "USERDEFINED":
raise Exception("Not implemented")
elif mnemonic.text == "RETURN":
input_1 = pcode.find("input_1")
no_branch = False
if input_1 is None:
builder.ret_void()
else:
raise Exception("Return value being passed")
elif mnemonic.text == "PIECE":
raise Exception("PIECE operation needs to be tested")
elif mnemonic.text == "SUBPIECE":
output = pcode.find("output")
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
if input_1.text == "0x0":
val = fetch_input_varnode(builder, input_0)
result = builder.trunc(val, ir.IntType(int(output.get("size")) * 8))
update_output(builder, output, result)
else:
builder.call(internal_functions['bit_extraction'], [])
elif mnemonic.text == "INT_EQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('==', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_NOTEQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('!=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_LESS":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('<', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SLESS":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_signed('<', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_LESSEQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('<=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SLESS_EQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_signed('<=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_ZEXT":
rhs = fetch_input_varnode(builder, pcode.find("input_0"))
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, rhs.type.pointee)
output = builder.zext(rhs, ir.IntType(int(pcode.find("output").get("size")) * 8))
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SEXT":
rhs = fetch_input_varnode(builder, pcode.find("input_0"))
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, rhs.type.pointee)
output = builder.sext(rhs, ir.IntType(int(pcode.find("output").get("size")) * 8))
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_ADD":
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
lhs = fetch_input_varnode(builder, input_0)
rhs = fetch_input_varnode(builder, input_1)
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
if input_0.text in pointers and input_1.get("storage") == "constant":
result = builder.gep(lhs, [ir.Constant(int64, int(input_1.text, 16))])
else:
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
result = builder.add(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SUB":
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
lhs = fetch_input_varnode(builder, input_0)
rhs = fetch_input_varnode(builder, input_1)
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
if input_0.text in pointers and input_1.get("storage") == "constant":
result = builder.gep(lhs, [ir.Constant(int64, -int(input_1.text, 16))])
else:
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
result = builder.sub(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_CARRY":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.uadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SCARRY":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.sadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SBORROW":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.sadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_2COMP":
val = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.not_(val)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_NEGATE":
val = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.neg(val)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_XOR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.xor(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_AND":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.and_(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_OR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.or_(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_LEFT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.shl(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_RIGHT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.lshr(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SRIGHT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.ashr(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_MULT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.mul(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_DIV":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.div(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_REM":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.urem(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SDIV":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.sdiv(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SREM":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.srem(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "BOOL_NEGATE":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.neg(lhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_XOR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.xor(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_AND":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.and_(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_OR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.or_(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "FLOAT_EQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NOTEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_LESS":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_LESSEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ADD":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_SUB":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_MULT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_DIV":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NEG":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ABS":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_SQRT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_CEIL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_FLOOR":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ROUND":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NAN":
raise Exception("Not implemented")
elif mnemonic.text == "INT2FLOAT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT2FLOAT":
raise Exception("Not implemented")
elif mnemonic.text == "TRUNC":
raise Exception("Not implemented")
elif mnemonic.text == "CPOOLREF":
raise Exception("Not implemented")
elif mnemonic.text == "NEW":
raise Exception("Not implemented")
elif mnemonic.text == "MULTIEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "INDIRECT":
raise Exception("Not implemented")
elif mnemonic.text == "PTRADD":
raise Exception("Not implemented")
elif mnemonic.text == "PTRSUB":
raise Exception("Not implemented")
elif mnemonic.text == "CAST":
raise Exception("Not implemented")
else:
raise Exception("Not a standard pcode instruction")
block_iterator += 1
instr += 1
if block_iterator < len(blocks) and no_branch:
builder.branch(list(blocks.values())[block_iterator])
def fetch_input_varnode(builder, name):
var_type = name.get("storage")
var_size = int(name.get("size")) * 8
if var_type == "register":
return builder.load(registers[name.text])
elif var_type == "unique":
if name.text not in list(uniques.keys()):
raise Exception("Temporary variable referenced before defined")
return uniques[name.text]
elif var_type == "constant":
var = ir.Constant(ir.IntType(var_size), int(name.text, 0))
return var
elif var_type == "memory":
return memory[name.text]
def update_output(builder, name, output):
var_type = name.get("storage")
if var_type == "register":
reg = registers[name.text]
if reg.type != output.type.as_pointer():
reg = builder.bitcast(reg, output.type.as_pointer())
builder.store(output, reg)
elif var_type == "unique":
uniques[name.text] = output
def fetch_output_varnode(name):
var_type = name.get("storage")
if var_type == "register":
return registers[name.text]
elif var_type == "unique":
if name.text not in uniques:
uniques[name.text] = None
return uniques[name.text]
def int_check_inputs(builder, lhs, rhs, target):
if lhs.type != target:
if lhs.type.is_pointer:
lhs2 = lhs
lhs = builder.ptrtoint(lhs, target)
if lhs2 == rhs:
rhs = lhs
if rhs.type != target and lhs != rhs:
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, target)
return lhs, rhs
def check_shift_inputs(builder, lhs, rhs, target):
if lhs.type != target:
if lhs.type.is_pointer:
lhs = builder.ptrtoint(lhs, target)
else:
lhs = builder.zext(lhs, target)
if rhs.type != target:
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, target)
else:
rhs = builder.zext(rhs, target)
return lhs, rhs
def int_comparison_check_inputs(builder, lhs, rhs):
# For integer comparison operations. We assume rhs is the correct type.
if lhs.type.is_pointer:
lhs = builder.ptrtoint(lhs, rhs.type)
return lhs, rhs
|
5123
|
from shovel import task
@task
def hello(name='Foo'):
'''Prints "Hello, " followed by the provided name.
Examples:
shovel bar.hello
shovel bar.hello --name=Erin
http://localhost:3000/bar.hello?Erin'''
print('Hello, %s' % name)
@task
def args(*args):
'''Echos back all the args you give it.
This exists mostly to demonstrate the fact that shovel
is compatible with variable argument functions.
Examples:
shovel bar.args 1 2 3 4
http://localhost:3000/bar.args?1&2&3&4'''
for arg in args:
print('You said "%s"' % arg)
@task
def kwargs(**kwargs):
'''Echos back all the kwargs you give it.
This exists mostly to demonstrate that shovel is
compatible with the keyword argument functions.
Examples:
shovel bar.kwargs --foo=5 --bar 5 --howdy hey
http://localhost:3000/bar.kwargs?foo=5&bar=5&howdy=hey'''
for key, val in kwargs.items():
print('You said "%s" => "%s"' % (key, val))
|
5132
|
from datetime import datetime
with open('/home/neo4j/neo4j-community-3.5.1/logs/debug.log', 'r') as log:
begin = []
end = []
for line in log:
if 'Index population started' in line:
begin.append(line[:23])
elif 'Index creation finished' in line:
end.append(line[:23])
if len(begin) == 0 or len(begin) > 9:
print("Something went wrong. Please check debug.log")
elif len(begin) != len(end):
print("{}/{} Done. Please come back later.".format(len(end), len(begin)))
else:
elapsed_time = 0
for i in range(0,9):
begin_tmp = datetime.strptime(begin[i], '%Y-%m-%d %H:%M:%S.%f')
end_tmp = datetime.strptime(end[i],'%Y-%m-%d %H:%M:%S.%f')
elapsed_time += (end_tmp-begin_tmp).total_seconds()
print("Done in {} s".format(elapsed_time))
|
5161
|
import _winreg
import os
def get_shared_cache_folder():
"""
Look in the registry for the configured cache folder.
If there is no entry, then we create one.
:return:
"""
_winreg.aReg = _winreg.ConnectRegistry(None, _winreg.HKEY_CURRENT_USER)
try:
key = _winreg.OpenKey(_winreg.aReg, r"SOFTWARE\CCP\EVEONLINE")
path, _ = _winreg.QueryValueEx(key, "CACHEFOLDER")
except OSError:
return None
return path
def set_shared_cache_folder(folder_path):
if not os.path.isdir(folder_path):
try:
os.makedirs(folder_path)
except OSError:
raise ValueError("Could not create directory {}".format(folder_path))
folder_path = os.path.normpath(folder_path) + os.sep
key_eveonline = _winreg.CreateKey(_winreg.aReg, r"SOFTWARE\CCP\EVEONLINE")
_winreg.SetValueEx(key_eveonline, "CACHEFOLDER", 0, _winreg.REG_SZ, folder_path)
key_eveprobe = _winreg.CreateKey(_winreg.aReg, r"SOFTWARE\CCP\EVEPROBE")
_winreg.SetValueEx(key_eveprobe, "CACHEFOLDER", 0, _winreg.REG_SZ, folder_path)
def get_index_path(hint):
return hint
|
5171
|
from xagents import a2c, acer, ddpg, dqn, ppo, td3, trpo
from xagents.a2c.agent import A2C
from xagents.acer.agent import ACER
from xagents.base import OffPolicy
from xagents.ddpg.agent import DDPG
from xagents.dqn.agent import DQN
from xagents.ppo.agent import PPO
from xagents.td3.agent import TD3
from xagents.trpo.agent import TRPO
from xagents.utils.cli import play_args, train_args, tune_args
from xagents.utils.common import register_models
__author__ = 'schissmantics'
__email__ = '<EMAIL>'
__license__ = 'MIT'
__version__ = '1.0.1'
agents = {
'a2c': {'module': a2c, 'agent': A2C},
'acer': {'module': acer, 'agent': ACER},
'dqn': {'module': dqn, 'agent': DQN},
'ppo': {'module': ppo, 'agent': PPO},
'td3': {'module': td3, 'agent': TD3},
'trpo': {'module': trpo, 'agent': TRPO},
'ddpg': {'module': ddpg, 'agent': DDPG},
}
register_models(agents)
commands = {
'train': (train_args, 'fit', 'Train given an agent and environment'),
'play': (
play_args,
'play',
'Play a game given a trained agent and environment',
),
'tune': (
tune_args,
'',
'Tune hyperparameters given an agent, hyperparameter specs, and environment',
),
}
|
5184
|
from exchange_sockets.exchange_websocket import ExchangeWebSocket
from singletones.custom_logger import MyLogger
import websocket
import threading
from time import sleep
from time import time
import json
import ssl
logger = MyLogger()
class BitstampWebsocket(ExchangeWebSocket):
def __init__(self, pairs_n_streams):
super().__init__('Bitstamp', pairs_n_streams)
self.possible_streams = ['live_trades', 'diff_order_book']
self.streams = []
def init_streams(self):
for pair, streams in self.pairs_n_streams.items():
for sub_stream in streams.split(','):
if self.has_stream(sub_stream):
cur = dict()
cur['event'] = 'bts:subscribe'
cur['data'] = {'channel': "{}_{}".format(sub_stream, pair)}
self.streams.append(cur)
def start_multiple_websocket(self, init_streams=True):
super().start_multiple_websocket(init_streams=init_streams)
websocket.enableTrace(True)
self.ws = websocket.WebSocketApp("wss://ws.bitstamp.net",
on_open=self.__on_open,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close)
self.wst = threading.Thread(target=lambda: self.ws.run_forever(sslopt={'cert_reqs': ssl.CERT_NONE}))
self.wst.daemon = True
self.wst.start()
logger.debug("Started thread")
# Wait for connect before continuing
conn_timeout = 15
while not self.ws.sock or not self.ws.sock.connected and conn_timeout:
sleep(1)
conn_timeout -= 1
if not conn_timeout:
logger.error("%s Couldn't connect to %s! Exiting.",
self.node,
self.exchange)
self.close_socket()
else:
logger.info('{} socket is started:\n{}\n{}'.format(self.exchange,
self.node,
str(self.streams)))
def save_trades(self, message):
data = message['data']
channel = message['channel']
symbol = channel.split('_')[-1]
stream = channel[:-(len(symbol) + 1)]
append_data = "{},{},{},{}\n".format(data['timestamp'],
data['price'],
data['amount'],
data['type'])
self.file_manager.save_data_to_file(self.exchange,
stream,
symbol,
append_data)
def save_level2_orderbook(self, message):
data = message['data']
channel = message['channel']
symbol = channel.split('_')[-1]
stream = channel[:-(len(symbol) + 1)]
all_data = {}
data_time = data['timestamp']
for side in ['bids', 'asks']:
for cur in data[side]:
if not all_data.get(symbol, None):
all_data[symbol] = []
price = cur[0]
size = cur[1]
all_data[symbol].append("{},{},{}\n".format(
data_time,
price,
size if side == "bids" else "-{}".format(size)))
for symbol, l2_ob_data in all_data.items():
for l2_ob in l2_ob_data:
self.file_manager.save_data_to_file(self.exchange,
stream,
symbol,
l2_ob)
def __on_message(self, ws, message):
if message is None:
return
try:
self.last_msg_time = int(time())
message = json.loads(message)
channel = message['channel']
if channel.startswith('diff_order_book'):
self.save_level2_orderbook(message)
elif channel.startswith('live_trades'):
self.save_trades(message)
except Exception as e:
logger.debug(str(e))
def __on_error(self, ws, error):
self.on_error = True
logger.error("On error\n{}\n{} {}".format(self.node,
self.exchange,
error))
def __on_close(self, ws):
logger.info("On close\n{}".format(self.exchange))
def __on_open(self, ws):
logger.info("On Open\n{}".format(self.exchange))
if self.streams:
for stream in self.streams:
logger.info('Subscribing to %s', json.dumps(stream))
self.ws.send(json.dumps(stream))
sleep(2)
else:
logger.error('%s. Stream is not initialized', self.exchange)
def close_socket(self):
self.exited = True
if self.ws:
self.ws.close()
|
5211
|
from copy import copy, deepcopy
import sqlite3
from hashlib import md5
import time
import os
import os.path as osp
from base64 import b64encode, b64decode
from zlib import compress, decompress
import itertools as it
import logging
# instead of pickle we use dill, so we can save dynamically defined
# classes
import dill
from wepy.sim_manager import Manager
from wepy.orchestration.configuration import Configuration
from wepy.orchestration.snapshot import SimApparatus, SimSnapshot
from wepy.util.kv import KV, SQLITE3_INMEMORY_URI, gen_uri
class OrchestratorError(Exception):
""" """
pass
class Orchestrator():
""" """
# we freeze the pickle protocol for making hashes, because we care
# more about stability than efficiency of newer versions
HASH_PICKLE_PROTOCOL = 3
DEFAULT_WORKDIR = Configuration.DEFAULT_WORKDIR
DEFAULT_CONFIG_NAME = Configuration.DEFAULT_CONFIG_NAME
DEFAULT_NARRATION = Configuration.DEFAULT_NARRATION
DEFAULT_MODE = Configuration.DEFAULT_MODE
DEFAULT_CHECKPOINT_FILENAME = "checkpoint.orch.sqlite"
ORCH_FILENAME_TEMPLATE = "{config}{narration}.orch.sqlite"
# the default way to oepn up the whole parent database
DEFAULT_ORCHESTRATION_MODE = 'x'
# mode to open the individual kv stores on the parent database
KV_MODE = 'r+'
# default timeout for connecting to a database
SQLITE3_DEFAULT_TIMEOUT = 5
# the fields to return (and their order) as a record for a run
# query
RUN_SELECT_FIELDS = ('last_cycle_idx', 'config_hash')
def __init__(self, orch_path=None,
mode='x',
append_only=False,
):
self._mode = mode
self._append_only = append_only
# handle the path and convert to a proper URI for the database
# given the path and the mode
self._db_uri = gen_uri(orch_path, mode)
# run table: start_hash, end_hash, num_cycles, configuration_id
# get a raw connection to the database
self._db = sqlite3.connect(self.db_uri, uri=True,
timeout=self.SQLITE3_DEFAULT_TIMEOUT)
self._closed = False
# set isolation level to autocommit
self._db.isolation_level = None
# we can use read_uncommited only in append_only mode (no
# updates) because you never have to worry about dirty reads
# since you can't update
if self.append_only:
self._db.execute("PRAGMA read_uncommited=1")
# we make a table for the run data, if it doesn't already
# exist
c = self._db.cursor().execute(self.create_run_table_query)
# initialize or open each of the separate KV-stores (tables in
# the same SQLite3 database)
# change the mode for the KV stores since we already created the database
# metadata: default init walkers, default apparatus, default
# configuration
self.metadata_kv = KV(db_url=self.db_uri,
table='meta',
mode='a',
value_types=None,
append_only=self.append_only)
# snapshots
self.snapshot_kv = KV(db_url=self.db_uri,
table='snapshots',
primary_key='snaphash',
value_name='snapshot',
mode='a',
append_only=self.append_only)
# configurations
self.configuration_kv = KV(db_url=self.db_uri,
table='configurations',
primary_key='config_hash',
value_name='config',
mode='a',
append_only=self.append_only)
@property
def mode(self):
return self._mode
@property
def append_only(self):
return self._append_only
def close(self):
if self._closed == True:
raise IOError("The database connection is already closed")
else:
# close all the connections
self.metadata_kv.close()
self.configuration_kv.close()
self.snapshot_kv.close()
self._db.close()
self._closed = True
@property
def db_uri(self):
return self._db_uri
@property
def orch_path(self):
# if it is not an in-memory database we parse off the path and
# return that
if self.db_uri == SQLITE3_INMEMORY_URI:
return None
else:
# URIs have the following form: protocol:url?query
# destructure the URI
_, tail = self.db_uri.split(':')
if len(tail.split('?')) > 1:
url, _ = tail.split('?')
else:
url = tail
return url
@classmethod
def serialize(cls, snapshot):
"""Serialize a snapshot to a compressed, encoded, pickle string
representation.
Currently uses the dill module for pickling because the base
pickle module is inadequate. However, it is mostly compatible
and can be read natively with pickle but this usage is
officially not supported. Instead use the deserialize_snapshot.
Also compresses with default zlib compression and is encoded
in base64.
The object will always have a deepcopy performed on it so that
all of the extraneous references to it are avoided since there
is no (AFAIK) way to make sure all references to an object are
deleted.
NOTE: Perhaps there is a way and that should be done (and
tested) to see if it provides stable pickles (i.e. pickles
that always hash to the same value). To avoid the overhead of
copying large objects.
Parameters
----------
snapshot : SimSnapshot object
The snapshot of the simulation you want to serialize.
Returns
-------
serial_str : str
Serialized string of the snapshot object
"""
serial_str = b64encode(
compress(
dill.dumps(
deepcopy(snapshot),
protocol=cls.HASH_PICKLE_PROTOCOL,
recurse=True)
)
)
return serial_str
# core methods for serializing python objects, used for snapshots,
# apparatuses, configurations, and the initial walker list
@classmethod
def deserialize(cls, serial_str):
"""Deserialize an unencoded string snapshot to an object.
Parameters
----------
serial_str : str
Serialized string of the snapshot object
Returns
-------
snapshot : SimSnapshot object
Simulation snapshot object
"""
return dill.loads(decompress(b64decode(serial_str)))
# defaults getters and setters
def set_default_sim_apparatus(self, sim_apparatus):
# serialize the apparatus and then set it
serial_app = self.serialize(sim_apparatus)
self.metadata_kv['default_sim_apparatus'] = serial_app
def set_default_init_walkers(self, init_walkers):
# serialize the apparatus and then set it
serial_walkers = self.serialize(init_walkers)
self.metadata_kv['default_init_walkers'] = serial_walkers
def set_default_configuration(self, configuration):
# serialize the apparatus and then set it
serial_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serial_config)
self.metadata_kv['default_configuration_hash'] = config_hash
self.configuration_kv[config_hash] = serial_config
def set_default_snapshot(self, snapshot):
snaphash = self.add_snapshot(snapshot)
# then save the hash in the metadata
self.metadata_kv['default_snapshot_hash'] = snaphash
return snaphash
def gen_default_snapshot(self):
# generate the snapshot
sim_start_hash = self.gen_start_snapshot(self.get_default_init_walkers())
# then save the hash in the metadata
self.metadata_kv['default_snapshot_hash'] = sim_start_hash
return sim_start_hash
def get_default_sim_apparatus(self):
return self.deserialize(self.metadata_kv['default_sim_apparatus'])
def get_default_init_walkers(self):
return self.deserialize(self.metadata_kv['default_init_walkers'])
def get_default_configuration(self):
config_hash = self.metadata_kv['default_configuration_hash']
return self.get_configuration(config_hash)
def get_default_configuration_hash(self):
return self.metadata_kv['default_configuration_hash']
def get_default_snapshot(self):
start_hash = self.metadata_kv['default_snapshot_hash']
return self.get_snapshot(start_hash)
def get_default_snapshot_hash(self):
return self.metadata_kv['default_snapshot_hash']
@classmethod
def hash_snapshot(cls, serial_str):
"""
Parameters
----------
serial_str :
Returns
-------
"""
return md5(serial_str).hexdigest()
def get_snapshot(self, snapshot_hash):
"""Returns a copy of a snapshot.
Parameters
----------
snapshot_hash :
Returns
-------
"""
return self.deserialize(self.snapshot_kv[snapshot_hash])
def get_configuration(self, config_hash):
"""Returns a copy of a snapshot.
Parameters
----------
config_hash :
Returns
-------
"""
return self.deserialize(self.configuration_kv[config_hash])
@property
def snapshot_hashes(self):
""" """
# iterate over the snapshot kv
return list(self.snapshot_kv.keys())
@property
def configuration_hashes(self):
""" """
# iterate over the snapshot kv
return list(self.configuration_kv.keys())
def add_snapshot(self, snapshot):
"""
Parameters
----------
snapshot :
Returns
-------
"""
# serialize the snapshot using the protocol for doing so
serialized_snapshot = self.serialize(snapshot)
# get the hash of the snapshot
snaphash = self.hash_snapshot(serialized_snapshot)
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in self.snapshot_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the snapshot in the KV store
self.snapshot_kv[snaphash] = serialized_snapshot
return snaphash
def add_serial_snapshot(self, serial_snapshot):
# get the hash of the snapshot
snaphash = self.hash_snapshot(serial_snapshot)
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in self.snapshot_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the snapshot in the KV store
self.snapshot_kv[snaphash] = serial_snapshot
return snaphash
def gen_start_snapshot(self, init_walkers):
"""
Parameters
----------
init_walkers :
Returns
-------
"""
# make a SimSnapshot object using the initial walkers and
start_snapshot = SimSnapshot(init_walkers, self.get_default_sim_apparatus())
# save the snapshot, and generate its hash
sim_start_md5 = self.add_snapshot(start_snapshot)
return sim_start_md5
@property
def default_snapshot_hash(self):
""" """
return self.metadata_kv['default_snapshot_hash']
@property
def default_snapshot(self):
""" """
return self.get_snapshot(self.default_snapshot_hash)
def snapshot_registered(self, snapshot):
"""Check whether a snapshot is already in the database, based on the
hash of it.
This serializes the snapshot so may be slow.
Parameters
----------
snapshot : SimSnapshot object
The snapshot object you want to query for.
Returns
-------
"""
# serialize and hash the snapshot
snaphash = self.hash_snapshot(self.serialize(snapshot))
# then check it
return self.snapshot_hash_registered(snaphash)
def snapshot_hash_registered(self, snapshot_hash):
"""Check whether a snapshot hash is already in the database.
Parameters
----------
snapshot_hash : str
The string hash of the snapshot.
Returns
-------
"""
if any([True if snapshot_hash == h else False for h in self.snapshot_hashes]):
return True
else:
return False
def configuration_hash_registered(self, config_hash):
"""Check whether a snapshot hash is already in the database.
Parameters
----------
snapshot_hash : str
The string hash of the snapshot.
Returns
-------
"""
if any([True if config_hash == h else False for h in self.configuration_hashes]):
return True
else:
return False
### run methods
def add_configuration(self, configuration):
serialized_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serialized_config)
# check that the hash is not already in the snapshots
if any([True if config_hash == md5 else False for md5 in self.configuration_hashes]):
# just skip the rest of the function and return the hash
return config_hash
# save the snapshot in the KV store
self.configuration_kv[config_hash] = serialized_config
return config_hash
def add_serial_configuration(self, serial_configuration):
# get the hash of the configuration
snaphash = self.hash_snapshot(serial_configuration)
# check that the hash is not already in the configurations
if any([True if snaphash == md5 else False for md5 in self.configuration_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the configuration in the KV store
self.configuration_kv[snaphash] = serial_configuration
return snaphash
@property
def create_run_table_query(self):
create_run_table_query = """
CREATE TABLE IF NOT EXISTS runs
(start_hash TEXT NOT NULL,
end_hash TEXT NOT NULL,
config_hash NOT NULL,
last_cycle_idx INTEGER NOT NULL,
PRIMARY KEY (start_hash, end_hash))
"""
return create_run_table_query
@property
def add_run_record_query(self):
add_run_row_query = """
INSERT INTO runs (start_hash, end_hash, config_hash, last_cycle_idx)
VALUES (?, ?, ?, ?)
"""
return add_run_row_query
@property
def update_run_record_query(self):
q = """
UPDATE runs
SET config_hash = ?,
last_cycle_idx = ?
WHERE start_hash=? AND end_hash=?
"""
return q
@property
def delete_run_record_query(self):
q = """
DELETE FROM runs
WHERE start_hash=? AND end_hash=?
"""
return q
def _add_run_record(self, start_hash, end_hash, configuration_hash, cycle_idx):
params = (start_hash, end_hash, configuration_hash, cycle_idx)
# do it as a transaction
c = self._db.cursor()
# run the insert
c.execute(self.add_run_record_query, params)
def _delete_run_record(self, start_hash, end_hash):
params = (start_hash, end_hash)
cursor = self._db.cursor()
cursor.execute(self.delete_run_record_query, params)
def _update_run_record(self, start_hash, end_hash, new_config_hash, new_last_cycle_idx):
params = (new_config_hash, new_last_cycle_idx, start_hash, end_hash)
# do it as a transaction
c = self._db.cursor()
# run the update
c.execute(self.update_run_record_query, params)
def register_run(self, start_hash, end_hash, config_hash, cycle_idx):
"""
Parameters
----------
start_hash :
end_hash :
config_hash :
cycle_idx : int
The cycle of the simulation run the checkpoint was generated for.
Returns
-------
"""
# check that the hashes are for snapshots in the orchestrator
# if one is not registered raise an error
if not self.snapshot_hash_registered(start_hash):
raise OrchestratorError(
"snapshot start_hash {} is not registered with the orchestrator".format(
start_hash))
if not self.snapshot_hash_registered(end_hash):
raise OrchestratorError(
"snapshot end_hash {} is not registered with the orchestrator".format(
end_hash))
if not self.configuration_hash_registered(config_hash):
raise OrchestratorError(
"config hash {} is not registered with the orchestrator".format(
config_hash))
# save the configuration and get it's id
self._add_run_record(start_hash, end_hash, config_hash, cycle_idx)
def get_run_records(self):
get_run_record_query = """
SELECT *
FROM runs
""".format(fields=', '.join(self.RUN_SELECT_FIELDS))
cursor = self._db.cursor()
cursor.execute(get_run_record_query)
records = cursor.fetchall()
return records
def get_run_record(self, start_hash, end_hash):
get_run_record_query = """
SELECT {fields}
FROM runs
WHERE start_hash=? AND end_hash=?
""".format(fields=', '.join(self.RUN_SELECT_FIELDS))
params = (start_hash, end_hash)
cursor = self._db.cursor()
cursor.execute(get_run_record_query, params)
record = cursor.fetchone()
return record
def run_last_cycle_idx(self, start_hash, end_hash):
record = self.get_run_record(start_hash, end_hash)
last_cycle_idx = record[self.RUN_SELECT_FIELDS.index('last_cycle_idx')]
return last_cycle_idx
def run_configuration(self, start_hash, end_hash):
record = self.get_run_record(start_hash, end_hash)
config_hash = record[self.RUN_SELECT_FIELDS.index('config_hash')]
# get the configuration object and deserialize it
return self.deserialize(self.configuration_kv[config_hash])
def run_configuration_hash(self, start_hash, end_hash):
record = self.get_run_record(start_hash, end_hash)
config_hash = record[self.RUN_SELECT_FIELDS.index('config_hash')]
return config_hash
def run_hashes(self):
return [(rec[0], rec[1]) for rec in self.get_run_records()]
def run_continues(self, start_hash, end_hash):
"""Given a start hash and end hash for a run, find the run that this
continues.
Parameters
----------
start_hash :
end_hash :
Returns
-------
run_id
"""
# loop through the runs in this orchestrator until we find one
# where the start_hash matches the end hash
runs = self.run_hashes()
run_idx = 0
while True:
run_start_hash, run_end_hash = runs[run_idx]
# if the start hash of the queried run is the same as the
# end hash for this run we have found it
if start_hash == run_end_hash:
return (run_start_hash, run_end_hash)
run_idx += 1
# if the index is over the number of runs we quit and
# return None as no match
if run_idx >= len(runs):
return None
def _init_checkpoint_db(self, start_hash, configuration, checkpoint_dir, mode='x'):
logging.debug("Initializing checkpoint orch database")
# make the checkpoint with the default filename at the checkpoint directory
checkpoint_path = osp.join(checkpoint_dir, self.DEFAULT_CHECKPOINT_FILENAME)
# create a new database in the mode specified
logging.debug("Creating checkpoint database")
checkpoint_orch = Orchestrator(checkpoint_path, mode=mode)
# add the starting snapshot, bypassing the serialization stuff
logging.debug("Setting the starting snapshot")
checkpoint_orch.snapshot_kv[start_hash] = self.snapshot_kv[start_hash]
# if we have a new configuration at runtime serialize and
# hash it
serialized_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serialized_config)
# save the configuration as well
checkpoint_orch.configuration_kv[config_hash] = serialized_config
checkpoint_orch.close()
logging.debug("closing connection to checkpoint database")
return checkpoint_path, config_hash
def _save_checkpoint(self, checkpoint_snapshot, config_hash,
checkpoint_db_path, cycle_idx,
):
"""
Parameters
----------
checkpoint_snapshot :
config_hash :
checkpoint_db_path :
mode :
(Default value = 'wb')
Returns
-------
"""
# orchestrator wrapper to the db
logging.debug("Opening the checkpoint orch database")
checkpoint_orch = Orchestrator(checkpoint_db_path, mode='r+')
# connection to the db
cursor = checkpoint_orch._db.cursor()
# we replicate the code for adding the snapshot here because
# we want it to occur transactionally the delete and add
# serialize the snapshot using the protocol for doing so
serialized_snapshot = self.serialize(checkpoint_snapshot)
# get the hash of the snapshot
snaphash = self.hash_snapshot(serialized_snapshot)
# the queries for deleting and inserting the new run record
delete_query = """
DELETE FROM runs
WHERE start_hash=?
AND end_hash=?
"""
insert_query = """
INSERT INTO runs (start_hash, end_hash, config_hash, last_cycle_idx)
VALUES (?, ?, ?, ?)
"""
# if there are any runs in the checkpoint orch remove the
# final snapshot
delete_params = None
if len(checkpoint_orch.run_hashes()) > 0:
start_hash, old_checkpoint_hash = checkpoint_orch.run_hashes()[0]
delete_params = (start_hash, old_checkpoint_hash)
else:
start_hash = list(checkpoint_orch.snapshot_kv.keys())[0]
# the config should already be in the orchestrator db
insert_params = (start_hash, snaphash, config_hash, cycle_idx)
# start this whole process as a transaction so we don't get
# something weird in between
logging.debug("Starting transaction for updating run table in checkpoint")
cursor.execute("BEGIN TRANSACTION")
# add the new one, using a special method for setting inside
# of a transaction
logging.debug("setting the new checkpoint snapshot into the KV")
cursor = checkpoint_orch.snapshot_kv.set_in_tx(cursor, snaphash, serialized_snapshot)
logging.debug("finished")
# if we need to delete the old end of the run snapshot and the
# run record for it
if delete_params is not None:
logging.debug("Old run record needs to be removed")
# remove the old run from the run table
logging.debug("Deleting the old run record")
cursor.execute(delete_query, delete_params)
logging.debug("finished")
# register the new run in the run table
logging.debug("Inserting the new run record")
cursor.execute(insert_query, insert_params)
logging.debug("finished")
# end the transaction
logging.debug("Finishing transaction")
cursor.execute("COMMIT")
logging.debug("Transaction committed")
# we do the removal of the old snapshot outside of the
# transaction since it is slow and can cause timeouts to
# occur. Furthermore, it is okay if it is in the checkpoint as
# the run record is what matters as long as the new checkpoint
# is there.
# delete the old snapshot if we need to
if delete_params is not None:
# WARN: occasionally and for unknown reasons we have found
# that the final checkpoint hash is the same as the one
# before. (The case where the last snapshot is on the same
# cycle as a backup is already covered). So as a last
# resort, we check that they don't have the same hash. If
# they do we don't delete it!
if snaphash != old_checkpoint_hash:
logging.debug("Deleting the old snapshot")
del checkpoint_orch.snapshot_kv[old_checkpoint_hash]
logging.debug("finished")
else:
logging.warn("Final snapshot has same hash as the previous checkpoint. Not deleting the previous one.")
checkpoint_orch.close()
logging.debug("closed the checkpoint orch connection")
@staticmethod
def gen_sim_manager(start_snapshot, configuration):
"""
Parameters
----------
start_snapshot :
configuration :
Returns
-------
"""
# construct the sim manager, in a wepy specific way
sim_manager = Manager(start_snapshot.walkers,
runner=start_snapshot.apparatus.filters[0],
boundary_conditions=start_snapshot.apparatus.filters[1],
resampler=start_snapshot.apparatus.filters[2],
# configuration options
work_mapper=configuration.work_mapper,
reporters=configuration.reporters,
sim_monitor=configuration.monitor,
)
return sim_manager
def run_snapshot_by_time(self, start_hash, run_time, n_steps,
checkpoint_freq=None,
checkpoint_dir=None,
configuration=None,
configuration_hash=None,
checkpoint_mode='x'):
"""For a finished run continue it but resetting all the state of the
resampler and boundary conditions
Parameters
----------
start_hash :
run_time :
n_steps :
checkpoint_freq :
(Default value = None)
checkpoint_dir :
(Default value = None)
configuration :
(Default value = None)
configuration_hash :
(Default value = None)
checkpoint_mode :
(Default value = None)
Returns
-------
"""
# you must have a checkpoint dir if you ask for a checkpoint
# frequency
if checkpoint_freq is not None and checkpoint_dir is None:
raise ValueError("Must provide a directory for the checkpoint file "
"is a frequency is specified")
if configuration_hash is not None and configuration is not None:
raise ValueError("Cannot specify both a hash of an existing configuration"
"and provide a runtime configuration")
# if no configuration was specified we use the default one, oth
elif (configuration is None) and (configuration_hash is None):
configuration = self.get_default_configuration()
# if a configuration hash was given only then we retrieve that
# configuration since we must pass configurations to the
# checkpoint DB initialization
elif configuration_hash is not None:
configuration = self.configuration_kv[configuration_hash]
# check that the directory for checkpoints exists, and create
# it if it doesn't and isn't already created
if checkpoint_dir is not None:
checkpoint_dir = osp.realpath(checkpoint_dir)
os.makedirs(checkpoint_dir, exist_ok=True)
# if the checkpoint dir is not specified don't create a
# checkpoint db orch
checkpoint_db_path = None
if checkpoint_dir is not None:
logging.debug("Initialization of checkpoint database is requested")
checkpoint_db_path, configuration_hash = self._init_checkpoint_db(start_hash,
configuration,
checkpoint_dir,
mode=checkpoint_mode)
logging.debug("finished initializing checkpoint database")
# get the snapshot and the configuration to use for the sim_manager
start_snapshot = self.get_snapshot(start_hash)
# generate the simulation manager given the snapshot and the
# configuration
sim_manager = self.gen_sim_manager(start_snapshot, configuration)
# handle and process the optional arguments for running simulation
if 'runner' in configuration.apparatus_opts:
runner_opts = configuration.apparatus_opts['runner']
else:
runner_opts = None
# run the init subroutine for the simulation manager
logging.debug("Running sim_manager.init")
sim_manager.init()
# run each cycle manually creating checkpoints when necessary
logging.debug("Starting run loop")
walkers = sim_manager.init_walkers
cycle_idx = 0
start_time = time.time()
while time.time() - start_time < run_time:
logging.debug("Running cycle {}".format(cycle_idx))
# run the cycle
walkers, filters = sim_manager.run_cycle(
walkers,
n_steps,
cycle_idx,
runner_opts=runner_opts,
)
# check to see if a checkpoint is necessary
if (checkpoint_freq is not None):
if (cycle_idx % checkpoint_freq == 0):
logging.debug("Checkpoint is required for this cycle")
# make the checkpoint snapshot
logging.debug("Generating the simulation snapshot")
checkpoint_snapshot = SimSnapshot(walkers, SimApparatus(filters))
# save the checkpoint (however that is implemented)
logging.debug("saving the checkpoint to the database")
self._save_checkpoint(checkpoint_snapshot,
configuration_hash,
checkpoint_db_path,
cycle_idx)
logging.debug("finished saving the checkpoint to the database")
# increase the cycle index for the next cycle
cycle_idx += 1
logging.debug("Finished the run cycle")
# the cycle index was set for the next cycle which didn't run
# so we decrement it
last_cycle_idx = cycle_idx - 1
logging.debug("Running sim_manager.cleanup")
# run the cleanup subroutine
sim_manager.cleanup()
# run the segment given the sim manager and run parameters
end_snapshot = SimSnapshot(walkers, SimApparatus(filters))
logging.debug("Run finished")
# return the things necessary for saving to the checkpoint if
# that is what is wanted later on
return end_snapshot, configuration_hash, checkpoint_db_path, last_cycle_idx
def orchestrate_snapshot_run_by_time(self, snapshot_hash, run_time, n_steps,
checkpoint_freq=None,
checkpoint_dir=None,
orchestrator_path=None,
configuration=None,
# these can reparametrize the paths
# for both the orchestrator produced
# files as well as the configuration
work_dir=None,
config_name=None,
narration=None,
mode=None,
# extra kwargs will be passed to the
# configuration.reparametrize method
**kwargs):
"""
Parameters
----------
snapshot_hash :
run_time :
n_steps :
checkpoint_freq :
(Default value = None)
checkpoint_dir :
(Default value = None)
orchestrator_path :
(Default value = None)
configuration :
(Default value = None)
# these can reparametrize the paths# for both the orchestrator produced# files as well as the configurationwork_dir :
(Default value = None)
config_name :
(Default value = None)
narration :
(Default value = None)
mode :
(Default value = None)
# extra kwargs will be passed to the# configuration.reparametrize method**kwargs :
Returns
-------
"""
# for writing the orchestration files we set the default mode
# if mode is not given
if mode is None:
# the orchestrator mode is used for pickling the
# orchestrator and so must be in bytes mode
orch_mode = self.DEFAULT_ORCHESTRATION_MODE
# there are two possible uses for the path reparametrizations:
# the configuration and the orchestrator file paths. If both
# of those are explicitly specified by passing in the whole
# configuration object or both of checkpoint_dir,
# orchestrator_path then those reparametrization kwargs will
# not be used. As this is likely not the intention of the user
# we will raise an error. If there is even one use for them no
# error will be raised.
# first check if any reparametrizations were even requested
parametrizations_requested = (True if work_dir is not None else False,
True if config_name is not None else False,
True if narration is not None else False,
True if mode is not None else False,)
# check if there are any available targets for reparametrization
reparametrization_targets = (True if configuration is None else False,
True if checkpoint_dir is None else False,
True if orchestrator_path is None else False)
# if paramatrizations were requested and there are no targets
# we need to raise an error
if any(parametrizations_requested) and not any(reparametrization_targets):
raise OrchestratorError("Reparametrizations were requested but none are possible,"
" due to all possible targets being already explicitly given")
# if any paths were not given and no defaults for path
# parameters we want to fill in the defaults for them. This
# will also fill in any missing parametrizations with defaults
# we do this by just setting the path parameters if they
# aren't set, then later the parametrization targets will be
# tested for if they have been set or not, and if they haven't
# then these will be used to generate paths for them.
if work_dir is None:
work_dir = self.DEFAULT_WORKDIR
if config_name is None:
config_name = self.DEFAULT_CONFIG_NAME
if narration is None:
narration = self.DEFAULT_NARRATION
if mode is None:
mode = self.DEFAULT_MODE
# if no configuration was specified use the default one
if configuration is None:
configuration = self.get_default_configuration()
# reparametrize the configuration with the given path
# parameters and anything else in kwargs. If they are none
# this will have no effect anyhow
logging.debug("Reparametrizing the configuration")
configuration = configuration.reparametrize(work_dir=work_dir,
config_name=config_name,
narration=narration,
mode=mode,
**kwargs)
# make parametric paths for the checkpoint directory and the
# orchestrator pickle to be made, unless they are explicitly given
if checkpoint_dir is None:
# the checkpoint directory will be in the work dir
logging.debug("checkpoint directory defaulted to the work_dir")
checkpoint_dir = work_dir
logging.debug("In the orchestrate run, calling to run_snapshot by time")
# then actually run the simulation with checkpointing. This
# returns the end snapshot and doesn't write out anything to
# orchestrators other than the checkpointing
(end_snapshot, configuration_hash, checkpoint_db_path, last_cycle_idx) =\
self.run_snapshot_by_time(snapshot_hash, run_time, n_steps,
checkpoint_freq=checkpoint_freq,
checkpoint_dir=checkpoint_dir,
configuration=configuration,
checkpoint_mode=orch_mode)
logging.debug("Finished running snapshot by time")
# if the last cycle in the run was a checkpoint skip this step
# of saving a checkpoint
do_final_checkpoint = True
# make sure the checkpoint_freq is defined before testing it
if checkpoint_freq is not None:
if checkpoint_freq % last_cycle_idx == 0:
logging.debug("Last cycle saved a checkpoint, no need to save one")
do_final_checkpoint = False
if do_final_checkpoint:
logging.debug("Saving a final checkpoint for the end of the run")
# now that it is finished we save the final snapshot to the
# checkpoint file. This is done transactionally using the
# SQLite transaction functionality (either succeeds or doesn't
# happen) that way we don't have worry about data integrity
# loss. Here we also don't have to worry about other processes
# interacting with the checkpoint which makes it isolated.
self._save_checkpoint(end_snapshot, configuration_hash,
checkpoint_db_path, last_cycle_idx)
logging.debug("Finished saving the final checkpoint for the run")
# then return the final orchestrator
logging.debug("Getting a connection to that orch to retun")
checkpoint_orch = Orchestrator(checkpoint_db_path,
mode='r+',
append_only=True)
return checkpoint_orch
def reconcile_orchestrators(host_path, *orchestrator_paths):
"""
Parameters
----------
template_orchestrator :
*orchestrators :
Returns
-------
"""
if not osp.exists(host_path):
assert len(orchestrator_paths) > 1, \
"If the host path is a new orchestrator, must give at least 2 orchestrators to merge."
# open the host orchestrator at the location which will have all
# of the new things put into it from the other orchestrators. If
# it doesn't already exist it will be created otherwise open
# read-write.
new_orch = Orchestrator(orch_path=host_path,
mode='a',
append_only=True)
# TODO deprecate, if there is no defaults we can't set them since
# the mode is append only, we don't really care about these so
# don't set them, otherwise do some mode logic to figure this out
# and open in write mode and set defaults, then change to append
# only
# # if this is an existing orchestrator copy the default
# # sim_apparatus and init_walkers
# try:
# default_app = new_orch.get_default_sim_apparatus()
# except KeyError:
# # no default apparatus, that is okay
# pass
# else:
# # set it
# new_orch.set_default_sim_apparatus(default_app)
# # same for the initial walkers
# try:
# default_walkers = new_orch.get_default_init_walkers()
# except KeyError:
# # no default apparatus, that is okay
# pass
# else:
# # set it
# new_orch.set_default_sim_apparatus(default_walkers)
for orch_path in orchestrator_paths:
# open it in read-write fail if doesn't exist
orch = Orchestrator(orch_path=orch_path,
mode='r+',
append_only=True)
# add in all snapshots from each orchestrator, by the hash not the
# snapshots themselves, we trust they are correct
for snaphash in orch.snapshot_hashes:
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in new_orch.snapshot_hashes]):
# skip it and move on
continue
# if it is not copy it over without deserializing
new_orch.snapshot_kv[snaphash] = orch.snapshot_kv[snaphash]
# add in the configurations for the runs from each
# orchestrator, by the hash not the snapshots themselves, we
# trust they are correct
for run_id in orch.run_hashes():
config_hash = orch.run_configuration_hash(*run_id)
# check that the hash is not already in the snapshots
if any([True if config_hash == md5 else False for md5 in new_orch.configuration_hashes]):
# skip it and move on
continue
# if it is not set it
new_orch.configuration_kv[config_hash] = orch.configuration_kv[config_hash]
# concatenate the run table with an SQL union from an attached
# database
attached_table_name = "other"
# query to attach the foreign database
attach_query = """
ATTACH '{}' AS {}
""".format(orch_path, attached_table_name)
# query to update the runs tabel with new unique runs
union_query = """
INSERT INTO runs
SELECT * FROM (
SELECT * FROM {}.runs
EXCEPT
SELECT * FROM runs
)
""".format(attached_table_name)
# query to detach the table
detach_query = """
DETACH {}
""".format(attached_table_name)
# then run the queries
cursor = new_orch._db.cursor()
try:
cursor.execute('BEGIN TRANSACTION')
cursor.execute(attach_query)
cursor.execute(union_query)
cursor.execute('COMMIT')
cursor.execute(detach_query)
except:
cursor.execute('COMMIT')
import pdb; pdb.set_trace()
cursor.execute("SELECT * FROM (SELECT * FROM other.runs EXCEPT SELECT * FROM runs)")
recs = cursor.fetchall()
return new_orch
|
5229
|
import logging
from typing import Match, Any, Dict
import aiohttp
from discord import Message
from MoMMI import comm_event, command, MChannel, always_command
logger = logging.getLogger(__name__)
@comm_event("ss14")
async def ss14_nudge(channel: MChannel, message: Any, meta: str) -> None:
try:
config: Dict[str, Any] = channel.module_config(f"ss14.servers.{meta}")
except ValueError:
return
expect_password = config["password"]
if expect_password != message.get("password"):
return
if "type" not in message or "contents" not in message:
return
contents = message["contents"]
type = message["type"]
if type == "ooc":
final_message = f"\u200B**OOC**: `{contents['sender']}`: {contents['contents']}"
else:
return
await channel.send(final_message)
@always_command("ss14_relay", unsafe=True)
async def ss14_relay(channel: MChannel, match: Match, message: Message) -> None:
if not channel.internal_name:
return
content = message.content
content = content.strip()
if not content or content[0] == "\u200B":
return
server = None
config: Any
for config in channel.server_config("modules.ss14", []):
if config["discord_channel"] != channel.internal_name:
continue
server = config["server"]
if not server:
return
config = channel.module_config(f"ss14.servers.{server}")
password = config["password"]
url = config["api_url"] + "/ooc"
async with aiohttp.ClientSession() as session:
async with session.post(url, json={"password": password, "sender": message.author.name, "contents": content}) as resp:
r = await resp.text()
logger.error(f"{resp.status}")
|
5239
|
import array
import struct
import time
from fcntl import ioctl
from typing import IO
from platypush.backend import Backend
from platypush.message.event.joystick import JoystickConnectedEvent, JoystickDisconnectedEvent, \
JoystickButtonPressedEvent, JoystickButtonReleasedEvent, JoystickAxisEvent
class JoystickLinuxBackend(Backend):
"""
This backend intercepts events from joystick devices through the native Linux API implementation.
It is loosely based on https://gist.github.com/rdb/8864666, which itself uses the
`Linux kernel joystick API <https://www.kernel.org/doc/Documentation/input/joystick-api.txt>`_ to interact with
the devices.
Triggers:
* :class:`platypush.message.event.joystick.JoystickConnectedEvent` when the joystick is connected.
* :class:`platypush.message.event.joystick.JoystickDisconnectedEvent` when the joystick is disconnected.
* :class:`platypush.message.event.joystick.JoystickButtonPressedEvent` when a joystick button is pressed.
* :class:`platypush.message.event.joystick.JoystickButtonReleasedEvent` when a joystick button is released.
* :class:`platypush.message.event.joystick.JoystickAxisEvent` when an axis value of the joystick changes.
"""
# These constants were borrowed from linux/input.h
axis_names = {
0x00: 'x',
0x01: 'y',
0x02: 'z',
0x03: 'rx',
0x04: 'ry',
0x05: 'rz',
0x06: 'throttle',
0x07: 'rudder',
0x08: 'wheel',
0x09: 'gas',
0x0a: 'brake',
0x10: 'hat0x',
0x11: 'hat0y',
0x12: 'hat1x',
0x13: 'hat1y',
0x14: 'hat2x',
0x15: 'hat2y',
0x16: 'hat3x',
0x17: 'hat3y',
0x18: 'pressure',
0x19: 'distance',
0x1a: 'tilt_x',
0x1b: 'tilt_y',
0x1c: 'tool_width',
0x20: 'volume',
0x28: 'misc',
}
button_names = {
0x120: 'trigger',
0x121: 'thumb',
0x122: 'thumb2',
0x123: 'top',
0x124: 'top2',
0x125: 'pinkie',
0x126: 'base',
0x127: 'base2',
0x128: 'base3',
0x129: 'base4',
0x12a: 'base5',
0x12b: 'base6',
0x12f: 'dead',
0x130: 'a',
0x131: 'b',
0x132: 'c',
0x133: 'x',
0x134: 'y',
0x135: 'z',
0x136: 'tl',
0x137: 'tr',
0x138: 'tl2',
0x139: 'tr2',
0x13a: 'select',
0x13b: 'start',
0x13c: 'mode',
0x13d: 'thumbl',
0x13e: 'thumbr',
0x220: 'dpad_up',
0x221: 'dpad_down',
0x222: 'dpad_left',
0x223: 'dpad_right',
# XBox 360 controller uses these codes.
0x2c0: 'dpad_left',
0x2c1: 'dpad_right',
0x2c2: 'dpad_up',
0x2c3: 'dpad_down',
}
def __init__(self, device: str = '/dev/input/js0', *args, **kwargs):
"""
:param device: Joystick device to monitor (default: ``/dev/input/js0``).
"""
super().__init__(*args, **kwargs)
self.device = device
self._axis_states = {}
self._button_states = {}
self._axis_map = []
self._button_map = []
def _init_joystick(self, dev: IO):
# Get the device name.
buf = array.array('B', [0] * 64)
ioctl(dev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = buf.tobytes().rstrip(b'\x00').decode('utf-8')
# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(dev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(dev, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(dev, 0x80406a32, buf) # JSIOCGAXMAP
for axis in buf[:num_axes]:
axis_name = self.axis_names.get(axis, 'unknown(0x%02x)' % axis)
self._axis_map.append(axis_name)
self._axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(dev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = self.button_names.get(btn, 'unknown(0x%03x)' % btn)
self._button_map.append(btn_name)
self._button_states[btn_name] = 0
self.bus.post(JoystickConnectedEvent(device=self.device, name=js_name, axes=self._axis_map,
buttons=self._button_map))
def run(self):
super().run()
self.logger.info(f'Opening {self.device}...')
while not self.should_stop():
# Open the joystick device.
try:
jsdev = open(self.device, 'rb')
self._init_joystick(jsdev)
except Exception as e:
self.logger.debug(f'Joystick device on {self.device} not available: {e}')
time.sleep(5)
continue
# Joystick event loop
while not self.should_stop():
try:
evbuf = jsdev.read(8)
if evbuf:
_, value, evt_type, number = struct.unpack('IhBB', evbuf)
if evt_type & 0x80: # Initial state notification
continue
if evt_type & 0x01:
button = self._button_map[number]
if button:
self._button_states[button] = value
evt_class = JoystickButtonPressedEvent if value else JoystickButtonReleasedEvent
# noinspection PyTypeChecker
self.bus.post(evt_class(device=self.device, button=button))
if evt_type & 0x02:
axis = self._axis_map[number]
if axis:
fvalue = value / 32767.0
self._axis_states[axis] = fvalue
# noinspection PyTypeChecker
self.bus.post(JoystickAxisEvent(device=self.device, axis=axis, value=fvalue))
except OSError as e:
self.logger.warning(f'Connection to {self.device} lost: {e}')
self.bus.post(JoystickDisconnectedEvent(device=self.device))
break
|
5258
|
import idna
class AddressMismatch(ValueError):
'''
In order to set up reverse resolution correctly, the ENS name should first
point to the address. This exception is raised if the name does
not currently point to the address.
'''
pass
class InvalidName(idna.IDNAError):
'''
This exception is raised if the provided name does not meet
the syntax standards specified in `EIP 137 name syntax
<https://github.com/ethereum/EIPs/blob/master/EIPS/eip-137.md#name-syntax>`_.
For example: names may not start with a dot, or include a space.
'''
pass
class UnauthorizedError(Exception):
'''
Raised if the sending account is not the owner of the name
you are trying to modify. Make sure to set ``from`` in the
``transact`` keyword argument to the owner of the name.
'''
pass
class UnownedName(Exception):
'''
Raised if you are trying to modify a name that no one owns.
If working on a subdomain, make sure the subdomain gets created
first with :meth:`~ens.main.ENS.setup_address`.
'''
pass
class BidTooLow(ValueError):
'''
Raised if you bid less than the minimum amount
'''
pass
class InvalidBidHash(ValueError):
'''
Raised if you supply incorrect data to generate the bid hash.
'''
pass
class InvalidLabel(ValueError):
'''
Raised if you supply an invalid label
'''
pass
class OversizeTransaction(ValueError):
'''
Raised if a transaction you are trying to create would cost so
much gas that it could not fit in a block.
For example: when you try to start too many auctions at once.
'''
pass
class UnderfundedBid(ValueError):
'''
Raised if you send less wei with your bid than you declared
as your intent to bid.
'''
pass
|
5261
|
from typing import List
from typing import Optional
from typing import Union
from models.vps import VpsStatus
from schemas.base import APIModel
from schemas.base import BasePagination
from schemas.base import BaseSchema
from schemas.base import BaseSuccessfulResponseModel
class VpsSshKeySchema(APIModel):
name: str
public_key: str = None
private_key: str = None
isp_id: int
ssh_key_id: Optional[str]
date_created: Optional[str]
fingerprint: Optional[str]
class VpsSpecPlanSchema(APIModel):
name: str
plan_code: Union[str, int]
region_codes: List = None
bandwidth: float
ram: int
vcpu: int
disk: int
price_monthly: Union[float, int, str] = None
price_hourly: Union[float, int, str] = None
price_yearly: Union[float, int, str] = None
class VpsSpecRegionSchema(APIModel):
name: str
region_code: Union[str, int]
features: List[str] = None
plan_codes: List[Union[str, int]] = []
class VpsSpecOsSchema(APIModel):
name: str
os_code: Union[str, int]
region_codes: List[Union[str, int]] = []
plan_codes: List[Union[str, int]] = []
class VpsSpecSchema(APIModel):
region: List[VpsSpecRegionSchema] = []
plan: List[VpsSpecPlanSchema] = []
os: List[VpsSpecOsSchema] = []
class VpsSpecResponse(BaseSuccessfulResponseModel):
result: VpsSpecSchema
class VpsCreateSchema(APIModel):
hostname: str
isp_id: int
region_code: str
os_code: str
plan_code: str
ssh_keys: List[str] = []
status: int = VpsStatus.init
remark: str = None
class VpsItemSchema(BaseSchema):
isp_id: int
ip: Union[int, str, None]
server_id: Optional[str]
hostname: str
os: Optional[str]
plan: Optional[str]
region: Optional[str]
status: int
status_name: str
status_msg: Optional[str]
isp_provider_name: str
class VpsItemResponse(BaseSuccessfulResponseModel):
result: VpsItemSchema
class VpsPaginationSchema(BasePagination):
items: Optional[List[VpsItemSchema]]
class VpsPaginationResponse(BaseSuccessfulResponseModel):
result: VpsPaginationSchema
class VpsSshKeyResponseSchema(BaseSuccessfulResponseModel):
result: List[VpsSshKeySchema]
|
5266
|
from django.urls import reverse
from consents.models import Consent, Term
from workshops.models import KnowledgeDomain, Person, Qualification
from workshops.tests.base import TestBase
class TestAutoUpdateProfile(TestBase):
def setUp(self):
self._setUpAirports()
self._setUpLessons()
self._setUpLanguages()
self.user = Person.objects.create_user(
username="user",
personal="",
family="",
email="<EMAIL>",
password="<PASSWORD>",
)
self.person_consent_required_terms(self.user)
Qualification.objects.create(person=self.user, lesson=self.git)
Qualification.objects.create(person=self.user, lesson=self.sql)
self.physics = KnowledgeDomain.objects.create(name="physics")
self.chemistry = KnowledgeDomain.objects.create(name="chemistry")
self.user.domains.add(self.physics)
self.user.languages.add(self.english)
self.user.languages.add(self.french)
self.client.login(username="user", password="<PASSWORD>")
def test_load_form(self):
rv = self.client.get(reverse("autoupdate_profile"))
self.assertEqual(rv.status_code, 200)
def test_update_profile(self):
term_slugs = [
"may-contact",
"may-publish-name",
"public-profile",
]
terms_by_term_slug = {
term.slug: term
for term in Term.objects.filter(slug__in=term_slugs)
.active()
.prefetch_active_options()
}
consent_data = {
f"consents-{slug}": terms_by_term_slug[slug].active_options[0].pk
for slug in term_slugs
}
data = {
"personal": "admin",
"middle": "",
"family": "Smith",
"email": "<EMAIL>",
"gender": Person.UNDISCLOSED,
"airport": self.airport_0_0.pk,
"github": "changed",
"twitter": "",
"url": "",
"username": "changed",
"affiliation": "",
"languages": [self.latin.pk, self.french.pk],
"domains": [self.chemistry.pk],
"lessons": [self.git.pk, self.matlab.pk],
"consents-person": self.user.pk,
**consent_data,
}
rv = self.client.post(reverse("autoupdate_profile"), data, follow=True)
self.assertEqual(rv.status_code, 200)
content = rv.content.decode("utf-8")
self.assertNotIn("Fix errors below", content)
self.user.refresh_from_db()
self.assertEqual(self.user.username, "user") # username is read-only
self.assertEqual(self.user.github, None) # github is read-only
self.assertEqual(self.user.family, "Smith")
self.assertEqual(set(self.user.lessons.all()), {self.git, self.matlab})
self.assertEqual(list(self.user.domains.all()), [self.chemistry])
self.assertEqual(set(self.user.languages.all()), {self.french, self.latin})
updated_consents_by_term_slug = {
consent.term.slug: consent
for consent in Consent.objects.filter(
term__slug__in=term_slugs, person=self.user
)
.active()
.select_related("term")
}
for slug in term_slugs:
self.assertEqual(
updated_consents_by_term_slug[slug].term_option.pk,
consent_data[f"consents-{slug}"],
)
|
5329
|
import libcst as cst
import libcst.matchers as m
from fixit import CstLintRule
from fixit import InvalidTestCase as Invalid
from fixit import ValidTestCase as Valid
class UseFstringRule(CstLintRule):
MESSAGE: str = (
"As mentioned in the [Contributing Guidelines]"
+ "(https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md), "
+ "please do not use printf style formatting or `str.format()`. "
+ "Use [f-string](https://realpython.com/python-f-strings/) instead to be "
+ "more readable and efficient."
)
VALID = [
Valid("assigned='string'; f'testing {assigned}'"),
Valid("'simple string'"),
Valid("'concatenated' + 'string'"),
Valid("b'bytes %s' % 'string'.encode('utf-8')"),
]
INVALID = [
Invalid("'hello, {name}'.format(name='you')"),
Invalid("'hello, %s' % 'you'"),
Invalid("r'raw string value=%s' % val"),
]
def visit_Call(self, node: cst.Call) -> None:
if m.matches(
node,
m.Call(
func=m.Attribute(value=m.SimpleString(), attr=m.Name(value="format"))
),
):
self.report(node)
def visit_BinaryOperation(self, node: cst.BinaryOperation) -> None:
if (
m.matches(
node, m.BinaryOperation(left=m.SimpleString(), operator=m.Modulo())
)
# SimpleString can be bytes and fstring don't support bytes.
# https://www.python.org/dev/peps/pep-0498/#no-binary-f-strings
and isinstance(
cst.ensure_type(node.left, cst.SimpleString).evaluated_value, str
)
):
self.report(node)
|
5355
|
from pathlib import Path
from typing import Dict
from errors.common.exception import DppError
class DppArgparseError(DppError):
pass
class DppArgparseTaxonomyNotFoundError(DppArgparseError):
def __init__(self, taxonomy_name: str):
super().__init__(f"taxonomy '{taxonomy_name}' does not exist")
self.taxonomy_name: str = taxonomy_name
class DppArgparseNotProjectDirectory(DppArgparseError):
def __init__(self, path: Path):
super().__init__(f"directory '{str(path)}' is not a defect taxonomy project")
self.path: Path = path
class DppArgparseDefectIndexError(DppArgparseError):
def __init__(self, index: int):
super().__init__(f"invalid index '{index}' of defects")
self.index: int = index
class DppArgparseFileNotFoundError(DppArgparseError, FileNotFoundError):
def __init__(self, path: str):
super().__init__()
self.path: str = path
class DppArgparseInvalidEnvironment(DppArgparseError):
def __init__(self, value: str):
super().__init__(
f"invalid environment variable format '{value}' (should be KEY=VALUE)"
)
self.value: str = value
class DppArgparseInvalidConfigError(DppArgparseError):
def __init__(self):
super().__init__()
class DppArgparseConfigCorruptedError(DppArgparseError):
def __init__(self, data: Dict):
super().__init__(f"config is corrupted: {data}")
self.data = data
class DppArgparseInvalidCaseExpressionError(DppArgparseError):
def __init__(self, index: int, name: str, cases: int, expr: str):
super().__init__(
f"Defect#{index} of {name} has {cases} test cases, but expression was: {expr}"
)
self.index: int = index
self.name: str = name
self.cases: int = cases
self.expr: str = expr
|
5371
|
import numpy as np
DEFAULT_FILE_PATH = "utils/datasets/glove.6B.50d.txt"
def loadWordVectors(tokens, filepath=DEFAULT_FILE_PATH, dimensions=50):
"""Read pretrained GloVe vectors"""
wordVectors = np.zeros((len(tokens), dimensions))
with open(filepath) as ifs:
for line in ifs:
line = line.strip()
if not line:
continue
row = line.split()
token = row[0]
if token not in tokens:
continue
data = [float(x) for x in row[1:]]
if len(data) != dimensions:
raise RuntimeError("wrong number of dimensions")
wordVectors[tokens[token]] = np.asarray(data)
return wordVectors
|
5412
|
import copy
import json
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
from oic.utils.keyio import KeyJar
from oic.utils.keyio import KeyBundle
__author__ = 'roland'
import logging
logger = logging.getLogger(__name__)
class OIDCError(Exception):
pass
def flow2sequence(operations, item):
flow = operations.FLOWS[item]
return [operations.PHASES[phase] for phase in flow["sequence"]]
class OIDCTestSetup(object):
def __init__(self, client_cls, config, test_defs):
"""
:param config: Imported configuration module
:return:
"""
self.client_cls = client_cls
self.config = config
self.test_features = []
self.client = self.create_client(**config.CLIENT)
self.test_defs = test_defs
def create_client(self, **kwargs):
"""
Instantiate a _client instance
:param: Keyword arguments
Keys are ["srv_discovery_url", "client_info", "client_registration",
"provider_info". "keys]
:return: _client instance
"""
_key_set = set(kwargs.keys())
args = {}
_client = self.client_cls(client_authn_method=CLIENT_AUTHN_METHOD,
behaviour=kwargs["behaviour"],
verify_ssl=self.config.VERIFY_SSL, **args)
# The behaviour parameter is not significant for the election process
_key_set.discard("behaviour")
try:
setattr(_client, "allow", kwargs["allow"])
except KeyError:
pass
else:
_key_set.discard("allow")
try:
jwks = self.construct_jwks(_client, kwargs["keys"])
except KeyError:
pass
else:
# export JWKS
f = open("export/jwk.json", "w")
f.write(json.dumps(jwks))
f.close()
_client.jwks_uri = self.config.CLIENT["key_export_url"]
self.test_features = _key_set
try:
_client.client_prefs = copy.copy(kwargs["preferences"])
except KeyError:
pass
else:
_key_set.discard("preferences")
if "client_info" in _key_set:
_client.redirect_uris = self.config.CLIENT[
"client_info"]["redirect_uris"]
elif "client_registration" in _key_set:
reg_info = self.config.CLIENT["client_registration"]
_client.redirect_uris = reg_info["redirect_uris"]
_client.client_id = reg_info["client_id"]
_client.client_secret = reg_info["client_secret"]
return _client
@staticmethod
def construct_jwks(_client, key_conf):
"""
Construct the jwks
"""
if _client.keyjar is None:
_client.keyjar = KeyJar()
kbl = []
kid_template = "a%d"
kid = 0
for typ, info in key_conf.items():
kb = KeyBundle(source="file://%s" % info["key"], fileformat="der",
keytype=typ)
for k in kb.keys():
k.serialize()
k.kid = kid_template % kid
kid += 1
_client.kid[k.use][k.kty] = k.kid
_client.keyjar.add_kb("", kb)
kbl.append(kb)
jwks = {"keys": []}
for kb in kbl:
# ignore simple keys
jwks["keys"].extend([k.to_dict()
for k in kb.keys() if k.kty != 'oct'])
return jwks
def make_sequence(self, flow):
"""
Translate a flow name into a sequence of request/responses.
:param flow: Which test flow to use
:return: test sequence and test definitions
"""
sequence = flow2sequence(self.test_defs, flow)
res = {"sequence": sequence,
"tests": {"pre": [], "post": []},
"flow": [flow],
"block": [],
"mode": "",
"expect_exception": False}
_flow = self.test_defs.FLOWS[flow]
for param in ["tests", "block", "mode", "expect_exception"]:
try:
res[param] = _flow[param]
except KeyError:
pass
return res
def add_init(self, test_spec):
"""
Add _client registration and provider info gathering if necessary
:param test_spec:
:return:
"""
_seq = test_spec["sequence"]
_flow = test_spec["flow"]
if "client_info" in self.test_features and \
"registration" not in test_spec["block"]:
_register = True
# May not be the first item in the sequence
for sq in _seq:
try:
if sq[0].request == "RegistrationRequest":
_register = False
except TypeError:
pass
if _register:
_ext = self.test_defs.PHASES["oic-registration"]
_seq.insert(0, _ext)
_flow.insert(0, "oic-registration")
if "srv_discovery_url" in self.test_features:
op_spec = self.test_defs.PHASES["provider-discovery"]
if op_spec not in _seq:
_seq.insert(0, op_spec)
_flow.insert(0, "provider-discovery")
return test_spec
def request_and_return(conv, url, response=None, method="GET", body=None,
body_type="json", state="", http_args=None,
**kwargs):
"""
:param url: The URL to which the request should be sent
:param response: Response type
:param method: Which HTTP method to use
:param body: A message body if any
:param body_type: The format of the body of the return message
:param http_args: Arguments for the HTTP _client
:return: A cls or ErrorResponse instance or the HTTP response
instance if no response body was expected.
"""
if http_args is None:
http_args = {}
_cli = conv._client
try:
_resp = _cli.http_request(url, method, data=body, **http_args)
except Exception:
raise
conv.position = url
conv.last_response = _resp
conv.last_content = _resp.content
if not "keyjar" in kwargs:
kwargs["keyjar"] = conv.keyjar
_response = _cli.parse_request_response(_resp, response, body_type, state,
**kwargs)
conv.protocol_response.append((_response, _resp.content))
return _response
def test_summation(conv, sid):
status = 0
for item in conv.test_output:
if item["status"] > status:
status = item["status"]
if status == 0:
status = 1
info = {
"id": sid,
"status": status,
"tests": conv.test_output
}
return info
|
5433
|
from .base_options import BaseOptions
class TestOptions(BaseOptions):
"""Test Option Class"""
def __init__(self):
super(TestOptions, self).__init__()
self.parser.add_argument('--load_checkpoint_path', required=True, type=str, help='checkpoint path')
self.parser.add_argument('--save_result_path', required=True, type=str, help='save result path')
self.parser.add_argument('--max_val_samples', default=None, type=int, help='max val data')
self.parser.add_argument('--batch_size', default=256, type=int, help='batch_size')
self.is_train = False
|
5578
|
import re
import setuptools
README_FILENAME = "README.md"
VERSION_FILENAME = "observed.py"
VERSION_RE = r"^__version__ = ['\"]([^'\"]*)['\"]"
# Get version information
with open(VERSION_FILENAME, "r") as version_file:
mo = re.search(VERSION_RE, version_file.read(), re.M)
if mo:
version = mo.group(1)
else:
msg = "Unable to find version string in %s." % (version_file,)
raise RuntimeError(msg)
# Get description information
with open(README_FILENAME, "r") as description_file:
long_description = description_file.read()
setuptools.setup(
name="observed",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="Observer pattern for functions and bound methods",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/DanielSank/observed",
py_modules=["observed"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
5579
|
import os
import json
import numpy as np
import pickle
from typing import Any
from pycocotools.coco import COCO
from torch.utils.data import Dataset
class DetectionMSCOCODataset(Dataset):
def __init__(self, annotation_file: str, image_dir: str):
self._annotation_file = annotation_file
self._image_dir = image_dir
self._cache_file = self._annotation_file + ".cache"
self._coco = COCO(self._annotation_file)
self._img_ids = self._coco.getImgIds()
self._cat_ids = self._coco.getCatIds()
self._ann_ids = self._coco.getAnnIds()
self._data = "coco"
self._classes = {
ind: cat_id for ind, cat_id in enumerate(self._cat_ids)
}
self._coco_to_class_map = {
value: key for key, value in self._classes.items()
}
self._load_data()
self._db_inds = np.arange(len(self._image_names))
self._load_coco_data()
def _load_data(self):
print("loading from cache file: {}".format(self._cache_file))
if not os.path.exists(self._cache_file):
print("No cache file found...")
self._extract_data()
with open(self._cache_file, "wb") as f:
pickle.dump([self._detections, self._image_names], f)
print("Cache file created")
else:
with open(self._cache_file, "rb") as f:
self._detections, self._image_names = pickle.load(f)
def _load_coco_data(self):
with open(self._annotation_file, "r") as f:
data = json.load(f)
coco_ids = self._coco.getImgIds()
eval_ids = {
self._coco.loadImgs(coco_id)[0]["file_name"]: coco_id
for coco_id in coco_ids
}
self._coco_categories = data["categories"]
self._coco_eval_ids = eval_ids
def class_name(self, cid):
cat_id = self._classes[cid]
cat = self._coco.loadCats([cat_id])[0]
return cat["name"]
def _extract_data(self):
self._image_names = [
self._coco.loadImgs(img_id)[0]["file_name"]
for img_id in self._img_ids
]
self._detections = {}
for ind, (coco_image_id, image_name) in enumerate(zip(self._img_ids, self._image_names)):
image = self._coco.loadImgs(coco_image_id)[0]
bboxes = []
categories = []
for cat_id in self._cat_ids:
annotation_ids = self._coco.getAnnIds(imgIds=image["id"], catIds=cat_id)
annotations = self._coco.loadAnns(annotation_ids)
category = self._coco_to_class_map[cat_id]
for annotation in annotations:
bbox = np.array(annotation["bbox"])
bbox[[2, 3]] += bbox[[0, 1]]
bboxes.append(bbox)
categories.append(category)
self._detections[image_name] = [{
'bbox': bbox.astype(np.float32),
'category_id': category,
'category_name': self.class_name(category)
} for bbox, category in zip(bboxes, categories)]
def __getitem__(self, ind: int) -> Any:
image_name = self._image_names[ind]
return {
'image_name': os.path.join(self._image_dir, image_name),
'detections': self._detections[image_name]
}
def __len__(self) -> int:
return len(self._img_ids)
def get_num_classes(self) -> int:
return len(self._cat_ids)
|
5671
|
from django.contrib import admin
from wouso.core.security.models import Report
admin.site.register(Report)
|
5673
|
from __future__ import annotations
from typing import TypeVar, Generic, Callable, Optional, Any, cast, Tuple
import rx
from returns import pipeline
from returns.functions import identity
from returns.maybe import Maybe, Nothing
from rx import Observable
from rx.subject import BehaviorSubject
from . import ReactiveValue, ReactiveView
from .value import Modifier
T = TypeVar("T")
class ReactiveProperty(Generic[T], ReactiveValue[T]):
def __init__(
self,
init_value: Maybe[T] = Nothing,
read_only=False,
modifier: Callable[[Any], Modifier] = lambda _: identity,
validator: Callable[[Any, T], T] = lambda _, v: v) -> None:
super().__init__(read_only)
self._init_value = init_value
self._modifier = modifier
self._validator = validator
@property
def init_value(self) -> Maybe[T]:
return self._init_value
@property
def validator(self) -> Callable[[T, Any], T]:
return self._validator
@property
def modifier(self) -> Callable[[Any], Modifier]:
return self._modifier
def as_view(self) -> ReactiveView[T]:
return ReactiveView(self.context, self.read_only)
def pipe(self, modifiers: Callable[[Any], Tuple[Modifier, ...]]) -> ReactiveProperty:
def stack(obj: Any):
# FIXME: Not sure why both PyCharm and Mypy fails to resolve pipeline.pipe(). Should investigate later.
# noinspection PyUnresolvedReferences
return pipeline.pipe(*([self.modifier(obj)] + list(modifiers(obj)))) # type:ignore
return ReactiveProperty(self.init_value, self.read_only, stack, self.validator)
def validate(self, validator: Callable[[Any, T], T]) -> ReactiveProperty[T]:
if validator is None:
raise ValueError("Argument 'modifier' is required.")
def validate(obj: Any, v: T) -> T:
return validator(obj, self.validator(obj, v))
return ReactiveProperty(self.init_value, self.read_only, self.modifier, validate)
class PropertyData(ReactiveValue.Data[T]):
def __init__(
self,
name: str,
init_value: Maybe[T],
modifier: Modifier,
validator: Callable[[T], T]):
assert name is not None
assert init_value is not None
assert modifier is not None
assert validator is not None
self._validator = validator
self._property: Optional[BehaviorSubject] = None
obs: Observable
if init_value != Nothing:
self._property = BehaviorSubject(init_value.map(validator).unwrap())
obs = self._property
else:
obs = rx.empty()
super().__init__(name, obs, modifier)
# Must override to appease Mypy... I hate Python.
@property
def value(self) -> T:
return super().value
@value.setter
def value(self, value: T):
self._check_disposed()
if self.initialized:
assert self._property is not None
self._property.on_next(self.validator(value))
else:
self._property = BehaviorSubject(self.validator(value))
self.observable = self._property
@property
def validator(self) -> Callable[[T], T]:
return self._validator
def dispose(self) -> None:
assert self._property is not None
self._check_disposed()
self._property.on_completed()
super().dispose()
def _create_data(self, obj: Any) -> PropertyData:
assert obj is not None
assert self.name is not None
def validate(v: T) -> T:
return self.validator(obj, v)
return self.PropertyData(self.name, self.init_value, self.modifier(obj), validate)
def _get_data(self, obj: Any) -> PropertyData:
assert obj is not None
return cast(ReactiveProperty.PropertyData, super()._get_data(obj))
def _set_value(self, obj: Any, data: ReactiveValue.Data, value: Any) -> None:
assert obj is not None
assert isinstance(data, ReactiveProperty.PropertyData)
data.value = value
|
5676
|
import time
import Queue
import random
import socket
import struct
import logging
import threading
from convert import *
from protocol import ethernet, ip, tcp, udp
ETH_P_IP = 0x0800 # IP protocol
ETH_P_ALL = 0x0003 # Every packet
NSCRIPT_PATH = 'nscript' # NSCRIPT PATH
PAYLOAD = {
53:('\x5d\x0d\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x06'
'google\x03com\x00\x00\x01\x00\x01'), # 'google.com' DNS Lookup
161:('\x30\x26\x02\x01\x01\x04\x06public\xa1\x19\x02'
'\x04\x56\x9f\x5a\xdd\x02\x01\x00\x02\x01\x00\x30\x0b\x30\x09\x06'
'\x05\x2b\x06\x01\x02\x01\x05\x00'), # SNMP GetNextRequest|public|2c version|1.3.6.1.2.1
123:('\x17\x00\x02\x05'), # NTP systats commands lacks 38 null bytes (just to save bandwidth)
1900:('M-SEARCH * HTTP/1.1\r\nHOST: 192.168.127.12:1900\r\n'
'MAN: "ssdp:discover"\r\nMX: 2\r\nST: ssdp:all\r\n\r\n')
}
class Generator(object):
def __init__(self, size):
self.size = size
self.inc = size/4
if self.inc<1:
self.inc = 1
self.base = -self.inc
self.num = self.base
self.index = 0
def __iter__(self):
return self
def next(self):
if (self.num+self.inc)>=self.size:
self.next_index()
self.next_base()
self.num = self.num + self.inc
return self.num
def next_base(self):
self.base = 0
self.base-= self.index
self.num = self.base
def next_index(self):
self.index+=1
if self.index>=self.inc:
raise StopIteration
def suspend(self):
return self.size, self.inc, self.base, self.num, self.index
def resume(self, size, inc, base, num, index):
self.size = size
self.inc = inc
self.base = base
self.num = num
self.index = index
class ScriptEngine(object):
def __init__(self, imports):
self.imports = imports
self.event = threading.Event()
self.queues = {}
self.thread = []
def Load(self):
for script in self.imports:
q = Queue.Queue()
s = __import__('{}.{}'.format(NSCRIPT_PATH, script),
fromlist=[NSCRIPT_PATH])
t = threading.Thread(target=s.run,
args=(q, self.event))
self.thread.append(t)
t.setDaemon(True)
t.start()
self.queues[script] = q
def Feed(self, host, port):
for scr in self.imports:
for r in self.imports[scr]:
if port in xrange(r[0], r[1]):
self.queues[scr].put((host, port))
break
def Cleanup(self):
while Alive(self.thread):
time.sleep(10)
class nscan(object):
def __init__(self, options):
self.options = options
self.hosts = self.split(options.hosts, options.threads)
self.ports = options.ports
self.srcp = random.randint(1, 65535)#self.PickPort() # source port
self.smac = options.smac
self.dmac = options.dmac
self.ifname = options.ifname
self.siface = options.siface
self.diface = options.diface
self.banner = options.banner
self.count = options.count
self.cooldown = options.cooldown
self.queue = Queue.Queue()
if options.stype.upper()=='U':
self.stype = socket.IPPROTO_UDP
else:
self.stype = socket.IPPROTO_TCP
self.events = {
'send': threading.Event(),
'recv': threading.Event()}
self.threads = {
'send': [],
'recv': None}
def __Transport(self, src, dst=0):
if self.stype==socket.IPPROTO_TCP:
transport = tcp.TCP(src, dst)
transport.seqn = 0xDEADC0DE
else:
transport = udp.UDP(src, dst)
return transport
def __Pack(self, transport, src, dst):
if self.stype==socket.IPPROTO_TCP:
transport.payload = ''
else:
transport.payload = PAYLOAD.get(transport.dstp, '\x00\r\n\r\n')
packed = transport.pack(src, dst)
return packed + transport.payload
def __CookieCheck(self, data):
check = False
dstp = struct.unpack('!H', data[22:24])[0]
if self.stype==socket.IPPROTO_UDP:
if dstp==self.srcp:
check = True
else:
ackn = struct.unpack('!L', data[28:32])[0]
flags = struct.unpack('B', data[33])[0] & 0b010010 # SYN-ACK
if dstp==self.srcp and ackn==0xDEADC0DF and flags==18:
check = True
return check
def init(self):
generators = []
for h in self.hosts:
g = Generator(h[1]-h[0])
generators.append(g)
t = threading.Thread(target=self.send, args=(h, self.srcp, g))
t.setDaemon(True)
self.threads['send'].append(t)
t = threading.Thread(target=self.recv)
t.setDaemon(True)
self.threads['recv'] = t
if 'resume' in dir(self.options):
i = 0
for g in generators:
g.resume(*self.options.indexes[i])
i+=1
return self.threads, self.events, self.queue, generators
def run(self):
self.events['send'].set()
self.events['recv'].set()
for t in self.threads['send']:
t.start()
self.threads['recv'].start()
def send(self, hosts, srcp, gen):
if 'ppp' in self.ifname:
family = socket.AF_INET
proto = socket.IPPROTO_RAW
eth = ''
else:
family = socket.AF_PACKET
proto = ETH_P_IP
eth = ethernet.ETHER(mac2byte(self.smac), mac2byte(self.dmac), ETH_P_IP).pack()
sock = socket.socket(family, socket.SOCK_RAW, proto)
transport = self.__Transport(srcp, 0)
npacket = 0
self.events['send'].wait()
target = hosts[0]
while self.events['send'].isSet():
try:
target = hosts[0] + gen.next()
iph = ip.IP(self.diface, dec2dot(target), self.stype)
except StopIteration:
break
for port_list in self.ports:
for port in range(port_list[0], port_list[1]):
if self.events['send'].isSet():
transport.dstp = port
packet = eth + iph.pack() + self.__Pack(transport, iph.src, iph.dst) #tcph.pack(iph.src, iph.dst)
sock.sendto(packet, (dec2dot(target), 0)) # self.ifname
npacket+=1
if not npacket%self.cooldown[0]:
time.sleep(self.cooldown[1])
else:
break
logging.info('[SEND] Sent: {} packets'.format(npacket))
sock.close()
def recv(self):
sock = socket.socket(socket.AF_INET,
socket.SOCK_RAW,
self.stype)
sock.bind(('', self.srcp))
sock.settimeout(5)
self.events['recv'].wait()
counter = 0
while self.events['recv'].isSet():
try:
data, sa_ll = sock.recvfrom(65535)
if self.__CookieCheck(data):
self.queue.put(Extract(data))
counter += 1
if counter==self.count:
self.events['send'].clear()
break
except socket.timeout:
continue
sock.close()
logging.info('[RECV] Received: {} packets'.format(counter))
def split(self, hosts, n):
'''
Split host range into n parts (multithreaded)
'''
nhosts = hosts[1] - hosts[0] # number of hosts
nparts = nhosts/n + 1
host_parts = []
start = hosts[0]
while True:
if len(host_parts)<n-1:
end = start + nparts
host_parts.append((start, end))
start = end
else:
host_parts.append((start, hosts[1]))
break
return host_parts
def PickPort(self):
while True:
srcp = random.randrange(10000, 65535)
if srcp not in self.sport:
self.sport.append(srcp)
break
return srcp
def Extract(packet):
src = socket.inet_ntoa(packet[12:16])
srcp = struct.unpack('!H', packet[20:22])[0]
return src, srcp
def Alive(thread_list):
''' check if thread is alive '''
alive = False
for t in thread_list:
if t.isAlive():
alive = True
break
return alive
|
5692
|
import warnings
import numba
import numpy as np
import strax
import straxen
DEFAULT_MAX_SAMPLES = 20_000
@straxen.mini_analysis(requires=('records',),
warn_beyond_sec=10,
default_time_selection='touching')
def records_matrix(records, time_range, seconds_range, config, to_pe,
max_samples=DEFAULT_MAX_SAMPLES,
ignore_max_sample_warning=False):
"""Return (wv_matrix, times, pms)
- wv_matrix: (n_samples, n_pmt) array with per-PMT waveform intensity in PE/ns
- times: time labels in seconds (corr. to rows)
- pmts: PMT numbers (corr. to columns)
Both times and pmts have one extra element.
:param max_samples: Maximum number of time samples. If window and dt
conspire to exceed this, waveforms will be downsampled.
:param ignore_max_sample_warning: If True, suppress warning when this happens.
Example:
wvm, ts, ys = st.records_matrix(run_id, seconds_range=(1., 1.00001))
plt.pcolormesh(ts, ys, wvm.T,
norm=matplotlib.colors.LogNorm())
plt.colorbar(label='Intensity [PE / ns]')
"""
if len(records):
dt = records[0]['dt']
samples_per_record = len(records[0]['data'])
else:
# Defaults here do not matter, nothing will be plotted anyway
dt = 10, 110
record_duration = samples_per_record * dt
window = time_range[1] - time_range[0]
if window / dt > max_samples:
with np.errstate(divide='ignore', invalid='ignore'):
# Downsample. New dt must be
# a) multiple of old dt
dts = np.arange(0, record_duration + dt, dt).astype(np.int)
# b) divisor of record duration
dts = dts[record_duration / dts % 1 == 0]
# c) total samples < max_samples
dts = dts[window / dts < max_samples]
if len(dts):
# Pick lowest dt that satisfies criteria
dt = dts.min()
else:
# Records will be downsampled to single points
dt = max(record_duration, window // max_samples)
if not ignore_max_sample_warning:
warnings.warn(f"Matrix would exceed max_samples {max_samples}, "
f"downsampling to dt = {dt} ns.")
wvm = _records_to_matrix(
records,
t0=time_range[0],
n_channels=config['n_tpc_pmts'],
dt=dt,
window=window)
wvm = wvm.astype(np.float32) * to_pe.reshape(1, -1) / dt
# Note + 1, so data for sample 0 will range from 0-1 in plot
ts = (np.arange(wvm.shape[0] + 1) * dt / int(1e9) + seconds_range[0])
ys = np.arange(wvm.shape[1] + 1)
return wvm, ts, ys
@straxen.mini_analysis(requires=('raw_records',),
warn_beyond_sec=3e-3,
default_time_selection='touching')
def raw_records_matrix(context, run_id, raw_records, time_range,
ignore_max_sample_warning=False,
max_samples=DEFAULT_MAX_SAMPLES,
**kwargs):
# Convert raw to records. We may not be able to baseline correctly
# at the start of the range due to missing zeroth fragments
records = strax.raw_to_records(raw_records)
strax.baseline(records, allow_sloppy_chunking=True)
strax.zero_out_of_bounds(records)
return context.records_matrix(run_id=run_id,
records=records,
time_range=time_range,
max_samples=max_samples,
ignore_max_sample_warning=ignore_max_sample_warning,
**kwargs)
@numba.njit
def _records_to_matrix(records, t0, window, n_channels, dt=10):
n_samples = (window // dt) + 1
# Use 32-bit integers, so downsampling saturated samples doesn't
# cause wraparounds
# TODO: amplitude bit shift!
y = np.zeros((n_samples, n_channels),
dtype=np.int32)
if not len(records):
return y
samples_per_record = len(records[0]['data'])
for r in records:
if r['channel'] > n_channels:
continue
if dt >= samples_per_record * r['dt']:
# Downsample to single sample -> store area
idx = (r['time'] - t0) // dt
if idx >= len(y):
print(len(y), idx)
raise IndexError('Despite n_samples = window // dt + 1, our '
'idx is too high?!')
y[idx, r['channel']] += r['area']
continue
# Assume out-of-bounds data has been zeroed, so we do not
# need to do r['data'][:r['length']] here.
# This simplifies downsampling.
w = r['data'].astype(np.int32)
if dt > r['dt']:
# Downsample
duration = samples_per_record * r['dt']
assert duration % dt == 0, "Cannot downsample fractionally"
# .astype here keeps numba happy ... ??
w = w.reshape(duration // dt, -1).sum(axis=1).astype(np.int32)
elif dt < r['dt']:
raise ValueError("Upsampling not yet implemented")
(r_start, r_end), (y_start, y_end) = strax.overlap_indices(
r['time'] // dt, len(w),
t0 // dt, n_samples)
# += is paranoid, data in individual channels should not overlap
# but... https://github.com/AxFoundation/strax/issues/119
y[y_start:y_end, r['channel']] += w[r_start:r_end]
return y
|
5707
|
from collections import defaultdict
import json
import re
import redis
import threading
import time
import traceback
import uuid
import base64
import binascii
TTL = 2
hash_keys = ('cmd', 'user')
cmd_hash_keys = {
'comment': ('addr',),
'extra_comment': ('addr',),
'area_comment': ('addr',),
'rename': ('addr',),
'stackvar_renamed': ('addr', 'offset', 'name',),
'struc_created': ('struc_name', 'is_union',),
'struc_deleted': ('struc_name',),
'struc_renamed': ('old_name', 'new_name',),
'struc_member_created': ('struc_name', 'offset', 'member_name', 'size', 'flag',),
'struc_member_deleted': ('struc_name', 'offset',),
'struc_member_renamed': ('struc_name', 'offset', 'member_name',),
'struc_member_changed': ('struc_name', 'offset', 'size',),
}
key_dec = {
'c': 'cmd',
'a': 'addr',
'u': 'user',
't': 'text',
'i': 'uuid',
'b': 'blocks'
}
key_enc = dict((v, k) for k, v in key_dec.items())
nick_filter = re.compile(r'[^a-zA-Z0-9_\-]')
def decode(data):
d = json.loads(data)
return dict((key_dec.get(k, k), v) for k, v in d.items())
def dtokey(d):
return tuple(((k, v) for k, v in sorted(d.items()) if k not in ('user', 'ts', 'uuid')))
def remove_ttl(a):
now = time.time()
return [d for d in a if now - d[0] < TTL]
class Client:
def __init__(self, host, port, nick, password=None):
self.r = redis.StrictRedis(host=host, port=port, password=password, socket_connect_timeout=5)
self.r.info()
self.nick = nick_filter.sub('_', nick)
self.ps = {}
self.nolock = threading.Lock()
self.nosend = defaultdict(list)
self.uuid = str(base64.b64encode(binascii.unhexlify(uuid.uuid4().hex)).decode('ascii'))
def debounce(self, no, data):
dkey = dtokey(data)
now = time.time()
with self.nolock:
for data in no:
ts = data[0]
key = data[1:]
if dkey == key and now - ts < TTL:
no.remove(data)
return True
return False
def _sub_thread(self, ps, cb, key):
for item in ps.listen():
try:
if item['type'] == 'message':
data = decode(item['data'])
if 'user' in data:
data['user'] = nick_filter.sub('_', data['user'])
# reject our own messages
if data.get('uuid') == self.uuid:
continue
with self.nolock:
self.nosend[key] = remove_ttl(self.nosend[key])
self.nosend[key].append((time.time(),) + dtokey(data))
cb(key, data)
elif item['type'] == 'subscribe':
decoded = []
for data in self.r.lrange(key, 0, -1):
try:
decoded.append(decode(data))
except Exception:
print('error decoding history', data)
traceback.print_exc()
state = []
dedup = set()
for data in reversed(decoded):
cmd = data.get('cmd')
if cmd:
keys = hash_keys + cmd_hash_keys.get(cmd, ())
hashkey = tuple([str(data.get(k)) for k in keys])
if all(hashkey):
if hashkey in dedup:
continue
dedup.add(hashkey)
state.append(data)
for data in reversed(state):
try:
with self.nolock:
self.nosend[key].append((time.time(),) + dtokey(data))
cb(key, data, replay=True)
except Exception:
print('error replaying history', data)
traceback.print_exc()
else:
print('unknown redis push', item)
except Exception:
print('error processing item', item)
traceback.print_exc()
def join(self, key, cb):
ps = self.r.pubsub()
ps.subscribe(key)
t = threading.Thread(target=self._sub_thread, args=(ps, cb, key))
t.daemon = True
t.start()
self.ps[key] = ps
self.publish(key, {'cmd': 'join'}, perm=False)
def leave(self, key):
ps = self.ps.pop(key, None)
if ps:
ps.unsubscribe(key)
def publish(self, key, data, perm=True, send_uuid=True):
if self.debounce(self.nosend[key], data):
return
data['user'] = self.nick
data['ts'] = self.r.time()[0]
if send_uuid:
data['uuid'] = self.uuid
data = dict((key_enc.get(k, k), v) for k, v in data.items())
data = json.dumps(data, separators=(',', ':'), sort_keys=True)
if perm:
self.r.rpush(key, data)
self.r.publish(key, data)
def push(self, key, data, send_uuid=True):
if send_uuid:
data['uuid'] = self.uuid
data = dict((key_enc.get(k, k), v) for k, v in data.items())
data = json.dumps(data, separators=(',', ':'), sort_keys=True)
self.r.lpush(key, data)
|
5712
|
import sys
import unittest
import os
import tempfile
from netCDF4 import Dataset
import numpy as np
from numpy.testing import assert_array_equal
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
VL_NAME = 'vlen_type'
VL_BASETYPE = np.int16
DIM1_NAME = 'lon'
DIM2_NAME = 'lat'
nlons = 5; nlats = 5
VAR1_NAME = 'ragged'
VAR2_NAME = 'strings'
VAR3_NAME = 'strings_alt'
VAR4_NAME = 'string_scalar'
VAR5_NAME = 'vlen_scalar'
data = np.empty(nlats*nlons,object)
datas = np.empty(nlats*nlons,object)
nn = 0
for n in range(nlats*nlons):
nn = nn + 1
data[n] = np.arange(nn,dtype=VL_BASETYPE)
datas[n] = ''.join([chr(i) for i in range(97,97+nn+1)])
data = np.reshape(data,(nlats,nlons))
datas = np.reshape(datas,(nlats,nlons))
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
ragged = f.createVariable(VAR1_NAME, vlen_t,\
(DIM2_NAME,DIM1_NAME))
strings = f.createVariable(VAR2_NAME, str,
(DIM2_NAME,DIM1_NAME))
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
string_scalar = f.createVariable(VAR4_NAME,str,())
vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,())
ragged[:] = data
ragged[-1,-1] = data[-1,-1]
strings[:] = datas
strings[-2,-2] = datas[-2,-2]
strings_alt[:] = datas.astype(str)
string_scalar[...] = 'foo' #issue458
vlen_scalar[...] = np.array([1,2,3],np.int16)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
v = f.variables[VAR1_NAME]
vs = f.variables[VAR2_NAME]
vs_alt = f.variables[VAR3_NAME]
assert list(f.vltypes.keys()) == [VL_NAME]
assert f.vltypes[VL_NAME].dtype == VL_BASETYPE
assert f.variables['string_scalar'][...] == 'foo'
assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16))
data2 = v[:]
data2s = vs[:]
for i in range(nlons):
for j in range(nlats):
assert_array_equal(data2[j,i], data[j,i])
assert datas[j,i] == data2s[j,i]
assert_array_equal(datas, vs_alt[:])
f.close()
class TestInvalidDataType(unittest.TestCase):
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC')
f.createDimension('x', 1)
# using assertRaisesRegext as a context manager
# only works with python >= 2.7 (issue #497)
#with self.assertRaisesRegexp(ValueError, 'strings are only supported'):
# f.createVariable('foo', str, ('x',))
try:
f.createVariable('foo', str, ('x',))
except ValueError:
pass
f.close()
os.remove(FILE_NAME)
class TestScalarVlenString(unittest.TestCase):
# issue 333
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF4')
teststring = f.createVariable('teststring', str)
stringout = "yyyymmdd_hhmmss"
teststring[()] = stringout
f.close()
f = Dataset(FILE_NAME)
assert f.variables['teststring'][:] == stringout
f.close()
os.remove(FILE_NAME)
class TestIntegerIndex(unittest.TestCase):
# issue 526
def runTest(self):
strtest = Dataset(FILE_NAME, 'w', format='NETCDF4')
strtest.createDimension('tenstrings', 10)
strtest.createVariable('tenstrings', str, ['tenstrings'])
strtest['tenstrings'][np.int32(5)] = 'asdf'
strtest['tenstrings'][6.0] = 'asdf'
strtest.close()
f = Dataset(FILE_NAME)
assert f.variables['tenstrings'][np.int32(5)] == 'asdf'
assert f.variables['tenstrings'][6.0] == 'asdf'
f.close()
os.remove(FILE_NAME)
class TestObjectArrayIndexing(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
strings_alt[:] = datas.astype(str)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
vs_alt = f.variables[VAR3_NAME]
unicode_strings = vs_alt[:]
fancy_indexed = unicode_strings[0][[1,2,4]]
assert fancy_indexed[0] == 'abc'
assert fancy_indexed[1] == 'abcd'
assert fancy_indexed[2] == 'abcdef'
f.close()
class VlenAppendTestCase(unittest.TestCase):
def setUp(self):
import netCDF4
if netCDF4.__netcdf4libversion__ < "4.4.1":
self.skip = True
try:
self.skipTest("This test requires NetCDF 4.4.1 or later.")
except AttributeError:
# workaround for Python 2.6 (skipTest(reason) is new
# in Python 2.7)
pass
else:
self.skip = False
self.file = FILE_NAME
f = Dataset(self.file, 'w')
vlen_type = f.createVLType(np.float64, 'vltest')
f.createDimension('x', None)
v = f.createVariable('vl', vlen_type, 'x')
w = f.createVariable('vl2', np.float64, 'x')
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing appending to vlen variables (issue #527)."""
# workaround for Python 2.6
if self.skip:
return
f = Dataset(self.file, 'a')
w = f.variables["vl2"]
v = f.variables["vl"]
w[0:3] = np.arange(3, dtype=np.float64)
v[0] # sometimes crashes
v[0].tolist() # sometimes crashes
v[0].size # BOOM!
f.close()
class Vlen_ScaledInts(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
nc = Dataset(self.file, 'w')
vlen_type = nc.createVLType(np.uint8, 'vltest')
nc.createDimension('x', None)
v = nc.createVariable('vl', vlen_type, 'x')
v.scale_factor = 1./254.
v.missing_value=np.array(255,np.uint8)
# random lengths between 1 and 1000
ilen = np.random.randint(1,1000,size=100)
n = 0
for nlen in ilen:
data = np.random.uniform(low=0.0, high=1.0, size=nlen)
v[n] = data
if n==99: self.data = data
n += 1
nc.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing packing float vlens as scaled integers (issue #1003)."""
nc = Dataset(self.file)
data = nc['vl'][-1]
# check max error of compression
err = np.abs(data - self.data)
assert(err.max() < nc['vl'].scale_factor)
# turn off auto-scaling
nc.set_auto_maskandscale(False)
data = nc['vl'][-1]
assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))
nc.close()
if __name__ == '__main__':
unittest.main()
|
5726
|
import uuid
import pickle
import pytest
import argparse
from collections import namedtuple
from six import text_type
from allure.common import AllureImpl, StepContext
from allure.constants import Status, AttachmentType, Severity, \
FAILED_STATUSES, Label, SKIPPED_STATUSES
from allure.utils import parent_module, parent_down_from_module, labels_of, \
all_of, get_exception_message, now, mangle_testnames
from allure.structure import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel
def pytest_addoption(parser):
parser.getgroup("reporting").addoption('--alluredir',
action="store",
dest="allurereportdir",
metavar="DIR",
default=None,
help="Generate Allure report in the specified directory (may not exist)")
severities = [v for (_, v) in all_of(Severity)]
def label_type(name, legal_values=set()):
"""
argparse-type factory for labelish things.
processed value is set of tuples (name, value).
:param name: of label type (for future TestLabel things)
:param legal_values: a `set` of values that are legal for this label, if any limit whatsoever
:raises ArgumentTypeError: if `legal_values` are given and there are values that fall out of that
"""
def a_label_type(string):
atoms = set(string.split(','))
if legal_values and not atoms < legal_values:
raise argparse.ArgumentTypeError('Illegal {} values: {}, only [{}] are allowed'.format(name, ', '.join(atoms - legal_values), ', '.join(legal_values)))
return set((name, v) for v in atoms)
return a_label_type
parser.getgroup("general").addoption('--allure_severities',
action="store",
dest="allureseverities",
metavar="SEVERITIES_SET",
default={},
type=label_type(name=Label.SEVERITY, legal_values=set(severities)),
help="""Comma-separated list of severity names.
Tests only with these severities will be run.
Possible values are:%s.""" % ', '.join(severities))
parser.getgroup("general").addoption('--allure_features',
action="store",
dest="allurefeatures",
metavar="FEATURES_SET",
default={},
type=label_type(name=Label.FEATURE),
help="""Comma-separated list of feature names.
Run tests that have at least one of the specified feature labels.""")
parser.getgroup("general").addoption('--allure_stories',
action="store",
dest="allurestories",
metavar="STORIES_SET",
default={},
type=label_type(name=Label.STORY),
help="""Comma-separated list of story names.
Run tests that have at least one of the specified story labels.""")
def pytest_configure(config):
reportdir = config.option.allurereportdir
if reportdir: # we actually record something
allure_impl = AllureImpl(reportdir)
testlistener = AllureTestListener(config)
pytest.allure._allurelistener = testlistener
config.pluginmanager.register(testlistener)
if not hasattr(config, 'slaveinput'):
# on xdist-master node do all the important stuff
config.pluginmanager.register(AllureAgregatingListener(allure_impl, config))
config.pluginmanager.register(AllureCollectionListener(allure_impl))
class AllureTestListener(object):
"""
Per-test listener.
Is responsible for recording in-test data and for attaching it to the test report thing.
The per-test reports are handled by `AllureAgregatingListener` at the `pytest_runtest_logreport` hook.
"""
def __init__(self, config):
self.config = config
self.environment = {}
self.test = None
# FIXME: that flag makes us pre-report failures in the makereport hook.
# it is here to cope with xdist's begavior regarding -x.
# see self.pytest_runtest_makereport and AllureAgregatingListener.pytest_sessionfinish
self._magicaldoublereport = hasattr(self.config, 'slaveinput') and self.config.getvalue("maxfail")
@pytest.mark.hookwrapper
def pytest_runtest_protocol(self, item, nextitem):
try:
# for common items
description = item.function.__doc__
except AttributeError:
# for doctests that has no `function` attribute
description = item.reportinfo()[2]
self.test = TestCase(name='.'.join(mangle_testnames([x.name for x in parent_down_from_module(item)])),
description=description,
start=now(),
attachments=[],
labels=labels_of(item),
status=None,
steps=[],
id=str(uuid.uuid4())) # for later resolution in AllureAgregatingListener.pytest_sessionfinish
self.stack = [self.test]
yield
self.test = None
self.stack = []
def attach(self, title, contents, attach_type):
"""
Store attachment object in current state for later actual write in the `AllureAgregatingListener.write_attach`
"""
attach = Attach(source=contents, # we later re-save those, oh my...
title=title,
type=attach_type)
self.stack[-1].attachments.append(attach)
def dynamic_issue(self, *issues):
"""
Attaches ``issues`` to the current active case
"""
if self.test:
self.test.labels.extend([TestLabel(name=Label.ISSUE, value=issue) for issue in issues])
def description(self, description):
"""
Sets description for the test
"""
if self.test:
self.test.description = description
def start_step(self, name):
"""
Starts an new :py:class:`allure.structure.TestStep` with given ``name``,
pushes it to the ``self.stack`` and returns the step.
"""
step = TestStep(name=name,
title=name,
start=now(),
attachments=[],
steps=[])
self.stack[-1].steps.append(step)
self.stack.append(step)
return step
def stop_step(self):
"""
Stops the step at the top of ``self.stack``
"""
step = self.stack.pop()
step.stop = now()
def _fill_case(self, report, call, pyteststatus, status):
"""
Finalizes with important data
:param report: py.test's `TestReport`
:param call: py.test's `CallInfo`
:param pyteststatus: the failed/xfailed/xpassed thing
:param status: a :py:class:`allure.constants.Status` entry
"""
[self.attach(name, contents, AttachmentType.TEXT) for (name, contents) in dict(report.sections).items()]
self.test.stop = now()
self.test.status = status
if status in FAILED_STATUSES:
self.test.failure = Failure(message=get_exception_message(call.excinfo, pyteststatus, report),
trace=report.longrepr or hasattr(report, 'wasxfail') and report.wasxfail)
elif status in SKIPPED_STATUSES:
skip_message = type(report.longrepr) == tuple and report.longrepr[2] or report.wasxfail
trim_msg_len = 89
short_message = skip_message.split('\n')[0][:trim_msg_len]
# FIXME: see pytest.runner.pytest_runtest_makereport
self.test.failure = Failure(message=(short_message + '...' * (len(skip_message) > trim_msg_len)),
trace=status == Status.PENDING and report.longrepr or short_message != skip_message and skip_message or '')
def report_case(self, item, report):
"""
Adds `self.test` to the `report` in a `AllureAggegatingListener`-understood way
"""
parent = parent_module(item)
# we attach a four-tuple: (test module ID, test module name, test module doc, environment, TestCase)
report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid,
parent.module.__name__,
parent.module.__doc__ or '',
self.environment,
self.test)))
@pytest.mark.hookwrapper
def pytest_runtest_makereport(self, item, call):
"""
Decides when to actually report things.
pytest runs this (naturally) three times -- with report.when being:
setup <--- fixtures are to be initialized in this one
call <--- when this finishes the main code has finished
teardown <--- tears down fixtures (that still possess important info)
`setup` and `teardown` are always called, but `call` is called only if `setup` passes.
See :py:func:`_pytest.runner.runtestprotocol` for proofs / ideas.
The "other side" (AllureAggregatingListener) expects us to send EXACTLY ONE test report (it wont break, but it will duplicate cases in the report -- which is bad.
So we work hard to decide exact moment when we call `_stop_case` to do that. This method may benefit from FSM (we keep track of what has already happened via self.test.status)
Expected behavior is:
FAILED when call fails and others OK
BROKEN when either setup OR teardown are broken (and call may be anything)
PENDING if skipped and xfailed
SKIPPED if skipped and not xfailed
"""
report = (yield).get_result()
status = self.config.hook.pytest_report_teststatus(report=report)
status = status and status[0]
if report.when == 'call':
if report.passed:
self._fill_case(report, call, status, Status.PASSED)
elif report.failed:
self._fill_case(report, call, status, Status.FAILED)
# FIXME: this is here only to work around xdist's stupid -x thing when in exits BEFORE THE TEARDOWN test log. Meh, i should file an issue to xdist
if self._magicaldoublereport:
# to minimize ze impact
self.report_case(item, report)
elif report.skipped:
if hasattr(report, 'wasxfail'):
self._fill_case(report, call, status, Status.PENDING)
else:
self._fill_case(report, call, status, Status.CANCELED)
elif report.when == 'setup': # setup / teardown
if report.failed:
self._fill_case(report, call, status, Status.BROKEN)
elif report.skipped:
if hasattr(report, 'wasxfail'):
self._fill_case(report, call, status, Status.PENDING)
else:
self._fill_case(report, call, status, Status.CANCELED)
elif report.when == 'teardown':
# as teardown is always called for testitem -- report our status here
if not report.passed:
if self.test.status not in FAILED_STATUSES:
# if test was OK but failed at teardown => broken
self._fill_case(report, call, status, Status.BROKEN)
else:
# mark it broken so, well, someone has idea of teardown failure
# still, that's no big deal -- test has already failed
# TODO: think about that once again
self.test.status = Status.BROKEN
# if a test isn't marked as "unreported" or it has failed, add it to the report.
if not item.get_marker("unreported") or self.test.status in FAILED_STATUSES:
self.report_case(item, report)
def pytest_runtest_setup(item):
item_labels = set((l.name, l.value) for l in labels_of(item)) # see label_type
arg_labels = set().union(item.config.option.allurefeatures,
item.config.option.allurestories,
item.config.option.allureseverities)
if arg_labels and not item_labels & arg_labels:
pytest.skip('Not suitable with selected labels: %s.' % ', '.join(text_type(l) for l in sorted(arg_labels)))
class LazyInitStepContext(StepContext):
"""
This is a step context used for decorated steps.
It provides a possibility to create step decorators, being initiated before pytest_configure, when no AllureListener initiated yet.
"""
def __init__(self, allure_helper, title):
self.allure_helper = allure_helper
self.title = title
self.step = None
@property
def allure(self):
listener = self.allure_helper.get_listener()
# if listener has `stack` we are inside a test
# record steps only when that
# FIXME: this breaks encapsulation a lot
if hasattr(listener, 'stack'):
return listener
class AllureHelper(object):
"""
This object holds various utility methods used from ``pytest.allure`` namespace, like ``pytest.allure.attach``
"""
def __init__(self):
self._allurelistener = None # FIXME: this gets injected elsewhere, like in the pytest_configure
def get_listener(self):
return self._allurelistener
def attach(self, name, contents, type=AttachmentType.TEXT): # @ReservedAssignment
"""
Attaches ``contents`` to a current context with given ``name`` and ``type``.
"""
if self._allurelistener:
self._allurelistener.attach(name, contents, type)
def label(self, name, *value):
"""
A decorator factory that returns ``pytest.mark`` for a given label.
"""
allure_label = getattr(pytest.mark, '%s.%s' % (Label.DEFAULT, name))
return allure_label(*value)
def severity(self, severity):
"""
A decorator factory that returns ``pytest.mark`` for a given allure ``level``.
"""
return self.label(Label.SEVERITY, severity)
def feature(self, *features):
"""
A decorator factory that returns ``pytest.mark`` for a given features.
"""
return self.label(Label.FEATURE, *features)
def story(self, *stories):
"""
A decorator factory that returns ``pytest.mark`` for a given stories.
"""
return self.label(Label.STORY, *stories)
def issue(self, *issues):
"""
A decorator factory that returns ``pytest.mark`` for a given issues.
"""
return self.label(Label.ISSUE, *issues)
def dynamic_issue(self, *issues):
"""
Mark test ``issues`` from inside.
"""
if self._allurelistener:
self._allurelistener.dynamic_issue(*issues)
def description(self, description):
"""
Sets description for the test
"""
if self._allurelistener:
self._allurelistener.description(description)
def testcase(self, *testcases):
"""
A decorator factory that returns ``pytest.mark`` for a given testcases.
"""
return self.label(Label.TESTCASE, *testcases)
def step(self, title):
"""
A contextmanager/decorator for steps.
TODO: when moving to python 3, rework this with ``contextlib.ContextDecorator``.
Usage examples::
import pytest
def test_foo():
with pytest.allure.step('mystep'):
assert False
@pytest.allure.step('make test data')
def make_test_data_bar():
raise ValueError('No data today')
def test_bar():
assert make_test_data_bar()
@pytest.allure.step
def make_test_data_baz():
raise ValueError('No data today')
def test_baz():
assert make_test_data_baz()
@pytest.fixture()
@pytest.allure.step('test fixture')
def steppy_fixture():
return 1
def test_baz(steppy_fixture):
assert steppy_fixture
"""
if callable(title):
return LazyInitStepContext(self, title.__name__)(title)
else:
return LazyInitStepContext(self, title)
def single_step(self, text):
"""
Writes single line to report.
"""
if self._allurelistener:
with self.step(text):
pass
def environment(self, **env_dict):
if self._allurelistener:
self._allurelistener.environment.update(env_dict)
@property
def attach_type(self):
return AttachmentType
@property
def severity_level(self):
return Severity
def __getattr__(self, attr):
"""
Provides fancy shortcuts for severity::
# these are the same
pytest.allure.CRITICAL
pytest.allure.severity(pytest.allure.severity_level.CRITICAL)
"""
if attr in dir(Severity) and not attr.startswith('_'):
return self.severity(getattr(Severity, attr))
else:
raise AttributeError
MASTER_HELPER = AllureHelper()
def pytest_namespace():
return {'allure': MASTER_HELPER}
class AllureAgregatingListener(object):
"""
Listens to pytest hooks to generate reports for common tests.
"""
def __init__(self, impl, config):
self.impl = impl
# module's nodeid => TestSuite object
self.suites = {}
def pytest_sessionfinish(self):
"""
We are done and have all the results in `self.suites`
Lets write em down.
But first we kinda-unify the test cases.
We expect cases to come from AllureTestListener -- and the have ._id field to manifest their identity.
Of all the test cases in suite.testcases we leave LAST with the same ID -- becase logreport can be sent MORE THAN ONE TIME
(namely, if the test fails and then gets broken -- to cope with the xdist's -x behavior we have to have tests even at CALL failures)
TODO: do it in a better, more efficient way
"""
for s in self.suites.values():
if s.tests: # nobody likes empty suites
s.stop = max(case.stop for case in s.tests)
known_ids = set()
refined_tests = []
for t in s.tests[::-1]:
if t.id not in known_ids:
known_ids.add(t.id)
refined_tests.append(t)
s.tests = refined_tests[::-1]
with self.impl._reportfile('%s-testsuite.xml' % uuid.uuid4()) as f:
self.impl._write_xml(f, s)
self.impl.store_environment()
def write_attach(self, attachment):
"""
Writes attachment object from the `AllureTestListener` to the FS, fixing it fields
:param attachment: a :py:class:`allure.structure.Attach` object
"""
# OMG, that is bad
attachment.source = self.impl._save_attach(attachment.source, attachment.type)
attachment.type = attachment.type.mime_type
def pytest_runtest_logreport(self, report):
if hasattr(report, '_allure_result'):
module_id, module_name, module_doc, environment, testcase = pickle.loads(report._allure_result)
report._allure_result = None # so actual pickled data is garbage-collected, see https://github.com/allure-framework/allure-python/issues/98
self.impl.environment.update(environment)
for a in testcase.iter_attachments():
self.write_attach(a)
self.suites.setdefault(module_id, TestSuite(name=module_name,
description=module_doc,
tests=[],
labels=[],
start=testcase.start, # first case starts the suite!
stop=None)).tests.append(testcase)
CollectFail = namedtuple('CollectFail', 'name status message trace')
class AllureCollectionListener(object):
"""
Listens to pytest collection-related hooks
to generate reports for modules that failed to collect.
"""
def __init__(self, impl):
self.impl = impl
self.fails = []
def pytest_collectreport(self, report):
if not report.passed:
if report.failed:
status = Status.BROKEN
else:
status = Status.CANCELED
self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split("::"))[-1],
status=status,
message=get_exception_message(None, None, report),
trace=report.longrepr))
def pytest_sessionfinish(self):
"""
Creates a testsuite with collection failures if there were any.
"""
if self.fails:
self.impl.start_suite(name='test_collection_phase',
title='Collection phase',
description='This is the tests collection phase. Failures are modules that failed to collect.')
for fail in self.fails:
self.impl.start_case(name=fail.name.split(".")[-1])
self.impl.stop_case(status=fail.status, message=fail.message, trace=fail.trace)
self.impl.stop_suite()
|
5734
|
from matplotlib.colors import ListedColormap
cm3 = ListedColormap(['#0000aa', '#ff2020', '#50ff50'])
cm2 = ListedColormap(['#0000aa', '#ff2020'])
|
5754
|
import sys
sys.path.append('../../')
import constants as cnst
import os
os.environ['PYTHONHASHSEED'] = '2'
import tqdm
from model.stg2_generator import StyledGenerator
import numpy as np
from my_utils.visualize_flame_overlay import OverLayViz
from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp
from my_utils.generate_gif import generate_from_flame_sequence
from my_utils.generic_utils import save_set_of_images
from my_utils import compute_fid
import constants
from dataset_loaders import fast_image_reshape
import torch
from my_utils import generic_utils
from my_utils.eye_centering import position_to_given_location
def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond):
if normal_map_cond and texture_cond:
return torch.cat((textured_rndr, norm_map), dim=1)
elif normal_map_cond:
return norm_map
elif texture_cond:
return textured_rndr
else:
return flm_params
# General settings
save_images = True
code_size = 236
use_inst_norm = True
core_tensor_res = 4
resolution = 256
alpha = 1
step_max = int(np.log2(resolution) - 2)
root_out_dir = f'{cnst.output_root}sample/'
num_smpl_to_eval_on = 1000
use_styled_conv_stylegan2 = True
flength = 5000
cam_t = np.array([0., 0., 0])
camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)
run_ids_1 = [29, ] # with sqrt(2)
# run_ids_1 = [7, 24, 8, 3]
# run_ids_1 = [7, 8, 3]
settings_for_runs = \
{24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},
7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},
3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},}
overlay_visualizer = OverLayViz()
# overlay_visualizer.setup_renderer(mesh_file=None)
flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')
fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()
for i, key in enumerate(fl_param_dict):
flame_param = fl_param_dict[key]
flame_param = np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'], flame_param['cam'],
flame_param['tex'], flame_param['lit'].flatten()))
# tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157])
# flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1)
# import ipdb; ipdb.set_trace()
flm_params[i, :] = flame_param.astype('float32')
if i == num_smpl_to_eval_on - 1:
break
batch_size = 64
flame_decoder = overlay_visualizer.deca.flame.eval()
for run_idx in run_ids_1:
# import ipdb; ipdb.set_trace()
generator_1 = torch.nn.DataParallel(
StyledGenerator(embedding_vocab_size=69158,
rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'],
normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],
core_tensor_res=core_tensor_res,
w_truncation_factor=1.0,
apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'],
n_mlp=8)).cuda()
model_idx = settings_for_runs[run_idx]['model_idx']
ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')
generator_1.load_state_dict(ckpt1['generator_running'])
generator_1 = generator_1.eval()
# images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))
pbar.set_description('Generating_images')
flame_mesh_imgs = None
mdl_id = 'mdl2_'
if settings_for_runs[run_idx]['name'] == 'full_model':
mdl_id = 'mdl1_'
for batch_idx in pbar:
flm_batch = flm_params[batch_idx:batch_idx+batch_size, :]
flm_batch = torch.from_numpy(flm_batch).cuda()
flm_batch = position_to_given_location(flame_decoder, flm_batch)
batch_size_true = flm_batch.shape[0]
if settings_for_runs[run_idx]['normal_maps_as_cond'] or \
settings_for_runs[run_idx]['rendered_flame_as_condition']:
cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:]
shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]]
exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]]
pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]]
# import ipdb; ipdb.set_trace()
light_code = \
flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3))
texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:]
norma_map_img, _, _, _, rend_flm = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam)
rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1
norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear')
norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
else:
rend_flm = None
norma_map_img = None
gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'],
settings_for_runs[run_idx]['rendered_flame_as_condition'])
# torch.manual_seed(2)
identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long,
device='cuda')
mdl_1_gen_images = generic_utils.get_images_from_flame_params(
flame_params=gen_1_in.cpu().numpy(), pose=None,
model=generator_1,
step=step_max, alpha=alpha,
input_indices=identity_embeddings.cpu().numpy())
# import ipdb; ipdb.set_trace()
images = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy()
flame_mesh_imgs = torch.clamp(rend_flm, -1, 1).cpu().numpy()
save_path_current_id = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name'])
save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}',
images=(images + 1) / 2, show_prog_bar=True)
#save flam rndr
save_path_current_id_flm_rndr = os.path.join(root_out_dir, 'inter_model_comparison',
settings_for_runs[run_idx]['name'])
save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}',
images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True)
# save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img + 1) / 2).cpu().numpy())
# save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images + 1) / 2).cpu().numpy())
# save_set_of_images(path=save_path_this_expt, prefix='mdl2_', images=((mdl_2_gen_images + 1) / 2).cpu().numpy())
|
5761
|
import unittest
import numpy as np
from numpy.testing import assert_almost_equal
from dymos.utils.hermite import hermite_matrices
class TestHermiteMatrices(unittest.TestCase):
def test_quadratic(self):
# Interpolate with values and rates provided at [-1, 1] in tau space
tau_given = [-1.0, 1.0]
tau_eval = np.linspace(-1, 1, 100)
# In time space use the boundaries [-2, 2]
dt_dtau = 4.0 / 2.0
# Provide values for y = t**2 and its time-derivative
y_given = [4.0, 4.0]
ydot_given = [-4.0, 4.0]
# Get the hermite matrices.
Ai, Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval)
# Interpolate y and ydot at tau_eval points in tau space.
y_i = np.dot(Ai, y_given) + dt_dtau * np.dot(Bi, ydot_given)
ydot_i = (1.0 / dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd, ydot_given)
# Compute our function as a point of comparison.
y_computed = (tau_eval * dt_dtau)**2
ydot_computed = 2.0 * (tau_eval * dt_dtau)
# Check results
assert_almost_equal(y_i, y_computed)
assert_almost_equal(ydot_i, ydot_computed)
def test_cubic(self):
# Interpolate with values and rates provided at [-1, 1] in tau space
tau_given = [-1.0, 0.0, 1.0]
tau_eval = np.linspace(-1, 1, 101)
# In time space use the boundaries [-2, 2]
dt_dtau = 4.0 / 2.0
# Provide values for y = t**2 and its time-derivative
y_given = [-8.0, 0.0, 8.0]
ydot_given = [12.0, 0.0, 12.0]
# Get the hermite matrices.
Ai, Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval)
# Interpolate y and ydot at tau_eval points in tau space.
y_i = np.dot(Ai, y_given) + dt_dtau * np.dot(Bi, ydot_given)
ydot_i = (1.0 / dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd, ydot_given)
# Compute our function as a point of comparison.
y_computed = (tau_eval * dt_dtau)**3
ydot_computed = 3.0 * (tau_eval * dt_dtau)**2
# Check results
assert_almost_equal(y_i, y_computed)
assert_almost_equal(ydot_i, ydot_computed)
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
5778
|
import tensorflow as tf
from typing import Optional
from tf_fourier_features import fourier_features
class FourierFeatureMLP(tf.keras.Model):
def __init__(self, units: int, final_units: int, gaussian_projection: Optional[int],
activation: str = 'relu',
final_activation: str = "linear",
num_layers: int = 1,
gaussian_scale: float = 1.0,
use_bias: bool = True, **kwargs):
"""
Fourier Feature Projection model from the paper
[Fourier Features Let Networks Learn High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/).
Used to create a multi-layer MLP with optional FourierFeatureProjection layer.
Args:
units: Number of hidden units in the intermediate layers.
final_units: Number of hidden units in the final layer.
activation: Activation in the hidden layers.
final_activation: Activation function of the final layer.
num_layers: Number of layers in the network.
gaussian_projection: Projection dimension for the gaussian kernel in fourier feature
projection layer. Can be None, negative or positive integer.
If None, then fourier feature map layer is not used.
If <=0, uses identity matrix (basic projection) without gaussian kernel.
If >=1, uses gaussian projection matrix of specified dim.
gaussian_scale: Scale of the gaussian kernel in fourier feature projection layer.
Note: If the scale is too small, convergence will slow down and obtain poor results.
If the scale is too large (>50), convergence will be fast but results will be grainy.
Try grid search for scales in the range [10 - 50].
use_bias: Boolean whether to use bias or not.
# References:
- [Fourier Features Let Networks Learn High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/)
"""
super().__init__(**kwargs)
layers = []
if gaussian_projection is not None:
layers.append(fourier_features.FourierFeatureProjection(
gaussian_projection=gaussian_projection,
gaussian_scale=gaussian_scale,
**kwargs
))
for _ in range(num_layers - 1):
layers.append(tf.keras.layers.Dense(units, activation=activation, use_bias=use_bias,
bias_initializer='he_uniform', **kwargs))
self.network = tf.keras.Sequential(layers)
self.final_dense = tf.keras.layers.Dense(final_units, activation=final_activation,
use_bias=use_bias, bias_initializer='he_uniform', **kwargs)
def call(self, inputs, training=None, mask=None):
features = self.network(inputs)
output = self.final_dense(features)
return output
|
5787
|
from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools
from conans.errors import ConanInvalidConfiguration
import os
import shutil
required_conan_version = ">=1.33.0"
class LibStudXmlConan(ConanFile):
name = "libstudxml"
description = "A streaming XML pull parser and streaming XML serializer implementation for modern, standard C++."
topics = ("xml", "xml-parser", "serialization")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.codesynthesis.com/projects/libstudxml/"
license = "MIT"
settings = "os", "compiler", "build_type", "arch"
exports_sources = "patches/*"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
_autotools = None
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
self.requires("expat/2.4.1")
def validate(self):
if self.settings.compiler == "Visual Studio":
if tools.Version(self.settings.compiler.version) < "9":
raise ConanInvalidConfiguration("Visual Studio {} is not supported.".format(self.settings.compiler.version))
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def build_requirements(self):
if self.settings.compiler != "Visual Studio":
self.build_requires("gnu-config/cci.20201022")
self.build_requires("libtool/2.4.6")
if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_autotools(self):
if not self._autotools:
args = ["--with-external-expat"]
if self.options.shared:
args.extend(["--enable-shared", "--disable-static"])
else:
args.extend(["--disable-shared", "--enable-static"])
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
self._autotools.configure(configure_dir=self._source_subfolder, args=args)
return self._autotools
def _build_vs(self):
vc_ver = int(tools.Version(self.settings.compiler.version).major)
sln_path = None
def get_sln_path():
return os.path.join(self._source_subfolder, "libstudxml-vc{}.sln".format(vc_ver))
sln_path = get_sln_path()
while not os.path.exists(sln_path):
vc_ver -= 1
sln_path = get_sln_path()
proj_path = os.path.join(self._source_subfolder, "xml", "libstudxml-vc{}.vcxproj".format(vc_ver))
if not self.options.shared:
tools.replace_in_file(proj_path, "DynamicLibrary", "StaticLibrary")
tools.replace_in_file(proj_path, "LIBSTUDXML_DYNAMIC_LIB", "LIBSTUDXML_STATIC_LIB")
msbuild = MSBuild(self)
msbuild.build(sln_path, platforms={"x86": "Win32"})
@property
def _user_info_build(self):
return getattr(self, "user_info_build", self.deps_user_info)
def _build_autotools(self):
shutil.copy(self._user_info_build["gnu-config"].CONFIG_SUB,
os.path.join(self._source_subfolder, "config", "config.sub"))
shutil.copy(self._user_info_build["gnu-config"].CONFIG_GUESS,
os.path.join(self._source_subfolder, "config", "config.guess"))
if self.settings.compiler.get_safe("libcxx") == "libc++":
# libc++ includes a file called 'version', and since libstudxml adds source_subfolder as an
# include dir, libc++ ends up including their 'version' file instead, causing a compile error
tools.remove_files_by_mask(self._source_subfolder, "version")
with tools.chdir(self._source_subfolder):
self.run("{} -fiv".format(tools.get_env("AUTORECONF")), win_bash=tools.os_info.is_windows)
autotools = self._configure_autotools()
autotools.make()
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
if self.settings.compiler == "Visual Studio":
self._build_vs()
else:
self._build_autotools()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
if self.settings.compiler == "Visual Studio":
self.copy("xml/value-traits", dst="include", src=self._source_subfolder)
self.copy("xml/serializer", dst="include", src=self._source_subfolder)
self.copy("xml/qname", dst="include", src=self._source_subfolder)
self.copy("xml/parser", dst="include", src=self._source_subfolder)
self.copy("xml/forward", dst="include", src=self._source_subfolder)
self.copy("xml/exception", dst="include", src=self._source_subfolder)
self.copy("xml/content", dst="include", src=self._source_subfolder)
self.copy("xml/*.ixx", dst="include", src=self._source_subfolder)
self.copy("xml/*.txx", dst="include", src=self._source_subfolder)
self.copy("xml/*.hxx", dst="include", src=self._source_subfolder)
self.copy("xml/*.h", dst="include", src=self._source_subfolder)
suffix = ""
if self.settings.arch == "x86_64":
suffix = "64"
if self.options.shared:
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "lib" + suffix))
self.copy("*.dll", dst="bin", src=os.path.join(self._source_subfolder, "bin" + suffix))
else:
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "bin" + suffix))
else:
autotools = self._configure_autotools()
autotools.install()
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "libstudxml.la")
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.names["pkg_config"] = "libstudxml"
# If built with makefile, static library mechanism is provided by their buildsystem already
if self.settings.compiler == "Visual Studio" and not self.options.shared:
self.cpp_info.defines = ["LIBSTUDXML_STATIC_LIB=1"]
|
5820
|
import collections
import copy
import intervaltree
from .label import Label
class LabelList:
"""
Represents a list of labels which describe an utterance.
An utterance can have multiple label-lists.
Args:
idx (str): An unique identifier for the label-list
within a corpus for one utterance.
labels (list): The list containing the
:py:class:`audiomate.annotations.Label`.
Attributes:
utterance (Utterance): The utterance this label-list is belonging to.
label_tree (IntervalTree): The interval-tree storing the labels.
Example:
>>> label_list = LabelList(idx='transcription', labels=[
>>> Label('this', 0, 2),
>>> Label('is', 2, 4),
>>> Label('timmy', 4, 8)
>>> ])
"""
__slots__ = ['idx', 'label_tree', 'utterance']
def __init__(self, idx='default', labels=None):
self.idx = idx
self.utterance = None
self.label_tree = intervaltree.IntervalTree()
if labels is not None:
self.update(labels)
def __eq__(self, other):
data_this = (self.idx, self.label_tree)
data_other = (other.idx, other.label_tree)
return data_this == data_other
def __iter__(self):
for interval in self.label_tree:
yield interval.data
def __len__(self):
return self.label_tree.__len__()
def __copy__(self):
# utterance is ignored intentionally,
# since it is kind of a weak ref
return LabelList(
idx=self.idx,
labels=[iv.data for iv in self.label_tree]
)
def __deepcopy__(self, memo):
# utterance is ignored intentionally,
# since it is kind of a weak ref
return LabelList(
idx=self.idx,
labels=copy.deepcopy([iv.data for iv in self.label_tree], memo)
)
@property
def labels(self):
""" Return list of labels. """
return list(self)
@property
def start(self):
""" Return start of the earliest starting label (lower bound). """
return self.label_tree.begin()
@property
def end(self):
""" Return end of the lastly ending label (upper bound). """
return self.label_tree.end()
@property
def total_length(self):
"""
Return the cumulative length of all labels
(Number of characters).
"""
return sum(label.length for label in self.labels)
#
# Alteration
#
def add(self, label):
"""
Add a label to the end of the list.
Args:
label (Label): The label to add.
"""
label.label_list = self
self.label_tree.addi(label.start, label.end, label)
def addl(self, value, start=0.0, end=float('inf')):
""" Shortcut for ``add(Label(value, start, end))``. """
self.add(Label(value, start=start, end=end))
def update(self, labels):
"""
Add a list of labels to the end of the list.
Args:
labels (list): Labels to add.
"""
ivs = []
for label in labels:
label.label_list = self
ivs.append(intervaltree.Interval(label.start, label.end, label))
self.label_tree.update(ivs)
def apply(self, fn):
"""
Apply the given function `fn` to every label in this label list.
`fn` is a function of one argument that receives the current label
which can then be edited in place.
Args:
fn (func): Function to apply to every label
Example:
>>> ll = LabelList(labels=[
... Label('a_label', 1.0, 2.0),
... Label('another_label', 2.0, 3.0)
... ])
>>> def shift_labels(label):
... label.start += 1.0
... label.end += 1.0
...
>>> ll.apply(shift_labels)
>>> ll.labels
[Label(a_label, 2.0, 3.0), Label(another_label, 3.0, 4.0)]
"""
for label in self.labels:
fn(label)
def merge_overlaps(self, threshold=0.0):
"""
Merge overlapping labels with the same value.
Two labels are considered overlapping,
if ``l2.start - l1.end < threshold``.
Args:
threshold (float): Maximal distance between two labels
to be considered as overlapping.
(default: 0.0)
Example:
>>> ll = LabelList(labels=[
... Label('a_label', 1.0, 2.0),
... Label('a_label', 1.5, 2.7),
... Label('b_label', 1.0, 2.0),
... ])
>>> ll.merge_overlapping_labels()
>>> ll.labels
[
Label('a_label', 1.0, 2.7),
Label('b_label', 1.0, 2.0),
]
"""
updated_labels = []
all_intervals = self.label_tree.copy()
# recursivly find a group of overlapping labels with the same value
def recursive_overlaps(interval):
range_start = interval.begin - threshold
range_end = interval.end + threshold
direct_overlaps = all_intervals.overlap(range_start, range_end)
all_overlaps = [interval]
all_intervals.discard(interval)
for overlap in direct_overlaps:
if overlap.data.value == interval.data.value:
all_overlaps.extend(recursive_overlaps(overlap))
return all_overlaps
# For every remaining interval
# - Find overlapping intervals recursively
# - Remove them
# - Create a concatenated new label
while not all_intervals.is_empty():
next_interval = list(all_intervals)[0]
overlapping = recursive_overlaps(next_interval)
ov_start = float('inf')
ov_end = 0.0
ov_value = next_interval.data.value
for overlap in overlapping:
ov_start = min(ov_start, overlap.begin)
ov_end = max(ov_end, overlap.end)
all_intervals.discard(overlap)
updated_labels.append(Label(
ov_value,
ov_start,
ov_end
))
# Replace the old labels with the updated ones
self.label_tree.clear()
self.update(updated_labels)
#
# Statistics
#
def label_total_duration(self):
"""
Return for each distinct label value the total duration of
all occurrences.
Returns:
dict: A dictionary containing for every label-value (key)
the total duration in seconds (value).
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3, 5),
>>> Label('b', 5, 8),
>>> Label('a', 8, 10),
>>> Label('b', 10, 14),
>>> Label('a', 15, 18.5)
>>> ])
>>> ll.label_total_duration()
{'a': 7.5 'b': 7.0}
"""
durations = collections.defaultdict(float)
for label in self:
durations[label.value] += label.duration
return durations
def label_values(self):
"""
Return a list of all occuring label values.
Returns:
list: Lexicographically sorted list (str) of label values.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14),
>>> Label('d', 15, 18)
>>> ])
>>> ll.label_values()
['a', 'b', 'c', 'd']
"""
all_labels = {l.value for l in self}
return sorted(all_labels)
def label_count(self):
"""
Return for each label the number of occurrences within the list.
Returns:
dict: A dictionary containing for every label-value (key)
the number of occurrences (value).
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('a', 7.2, 10.5),
>>> Label('b', 10.5, 14),
>>> Label('a', 15, 18)
>>> ])
>>> ll.label_count()
{'a': 3 'b': 2}
"""
occurrences = collections.defaultdict(int)
for label in self:
occurrences[label.value] += 1
return occurrences
def all_tokens(self, delimiter=' '):
"""
Return a list of all tokens occurring in the label-list.
Args:
delimiter (str): The delimiter used to split labels into tokens.
See :meth:`audiomate.annotations.Label.tokenized`
Returns:
:class:`set`: A set of distinct tokens.
"""
tokens = set()
for label in self:
tokens = tokens.union(set(label.tokenized(delimiter=delimiter)))
return tokens
#
# Query Label Values
#
def join(self, delimiter=' ', overlap_threshold=0.1):
"""
Return a string with all labels concatenated together.
The order of the labels is defined by the start of the label.
If the overlapping between two labels is greater than
``overlap_threshold``, an Exception is thrown.
Args:
delimiter (str): A string to join two consecutive labels.
overlap_threshold (float): Maximum overlap between two
consecutive labels.
Returns:
str: A string with all labels concatenated together.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('c', start=7.0, end=10.2),
>>> Label('d', start=10.3, end=14.0)
>>> ])
>>> ll.join(' - ')
'a - b - c - d'
"""
sorted_by_start = sorted(self.labels)
concat_values = []
last_label_end = None
for label in sorted_by_start:
if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0):
concat_values.append(label.value)
last_label_end = label.end
else:
raise ValueError('Labels overlap, not able to define the correct order')
return delimiter.join(concat_values)
def tokenized(self, delimiter=' ', overlap_threshold=0.1):
"""
Return a ordered list of tokens based on all labels.
Joins all token from all labels (``label.tokenized()```).
If the overlapping between two labels is greater than
``overlap_threshold``, an Exception is thrown.
Args:
delimiter (str): The delimiter used to split labels into tokens.
(default: space)
overlap_threshold (float): Maximum overlap between two
consecutive labels.
Returns:
str: A list containing tokens of all labels ordered according
to the label order.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a d q', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('c a', start=7.0, end=10.2),
>>> Label('f g', start=10.3, end=14.0)
>>> ])
>>> ll.tokenized(delimiter=' ', overlap_threshold=0.1)
['a', 'd', 'q', 'b', 'c', 'a', 'f', 'g']
"""
sorted_by_start = sorted(self.labels)
tokens = []
last_label_end = None
for label in sorted_by_start:
if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0):
tokens.extend(label.tokenized(delimiter=delimiter))
last_label_end = label.end
else:
raise ValueError('Labels overlap, not able to define the correct order')
return tokens
#
# Restructuring
#
def separated(self):
"""
Create a separate Label-List for every distinct label-value.
Returns:
dict: A dictionary with distinct label-values as keys. Every value
is a LabelList containing only labels with the same value.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('a', start=7.0, end=10.2),
>>> Label('b', start=10.3, end=14.0)
>>> ])
>>> s = ll.separate()
>>> s['a'].labels
[Label('a', start=0, end=4), Label('a', start=7.0, end=10.2)]
>>> s['b'].labels
[Label('b', start=3.95, end=6.0), Label('b', start=10.3, end=14.0)]
"""
separated_lls = collections.defaultdict(LabelList)
for label in self.labels:
separated_lls[label.value].add(label)
for ll in separated_lls.values():
ll.idx = self.idx
return separated_lls
def labels_in_range(self, start, end, fully_included=False):
"""
Return a list of labels, that are within the given range.
Also labels that only overlap are included.
Args:
start(float): Start-time in seconds.
end(float): End-time in seconds.
fully_included(bool): If ``True``, only labels fully included
in the range are returned. Otherwise
also overlapping ones are returned.
(default ``False``)
Returns:
list: List of labels in the range.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14)
>>>])
>>> ll.labels_in_range(6.2, 10.1)
[Label('b', 5.1, 8.9), Label('c', 7.2, 10.5)]
"""
if fully_included:
intervals = self.label_tree.envelop(start, end)
else:
intervals = self.label_tree.overlap(start, end)
return [iv.data for iv in intervals]
def ranges(self, yield_ranges_without_labels=False, include_labels=None):
"""
Generate all ranges of the label-list. A range is defined
as a part of the label-list for which the same labels are defined.
Args:
yield_ranges_without_labels(bool): If True also yields ranges for
which no labels are defined.
include_labels(list): If not empty, only the label values in
the list will be considered.
Returns:
generator: A generator which yields one range
(tuple start/end/list-of-labels) at a time.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14)
>>>])
>>> ranges = ll.ranges()
>>> next(ranges)
(3.2, 4.5, [ < audiomate.annotations.Label at 0x1090527c8 > ])
>>> next(ranges)
(4.5, 5.1, [])
>>> next(ranges)
(5.1, 7.2, [ < audiomate.annotations.label.Label at 0x1090484c8 > ])
"""
tree_copy = self.label_tree.copy()
# Remove labels not included
if include_labels is not None:
for iv in list(tree_copy):
if iv.data.value not in include_labels:
tree_copy.remove(iv)
def reduce(x, y):
x.append(y)
return x
# Split labels when overlapping and merge equal ranges to a list of labels
tree_copy.split_overlaps()
tree_copy.merge_equals(data_reducer=reduce, data_initializer=[])
intervals = sorted(tree_copy)
last_end = intervals[0].begin
# yield range by range
for iv in intervals:
# yield an empty range if necessary
if yield_ranges_without_labels and iv.begin > last_end:
yield (last_end, iv.begin, [])
yield (iv.begin, iv.end, iv.data)
last_end = iv.end
def split(self, cutting_points, shift_times=False, overlap=0.0):
"""
Split the label-list into x parts and return them as new label-lists.
x is defined by the number of cutting-points
(``x == len(cutting_points) + 1``).
The result is a list of label-lists corresponding to each part.
Label-list 0 contains labels between ``0`` and ``cutting_points[0]``.
Label-list 1 contains labels between ``cutting_points[0]`` and
``cutting_points[1]``. And so on.
Args:
cutting_points(list): List of floats defining the points in seconds,
where the label-list is splitted.
shift_times(bool): If True, start and end-time are shifted in
splitted label-lists. So the start is relative
to the cutting point and not to the beginning
of the original label-list.
overlap(float): Amount of overlap in seconds. This amount is
subtracted from a start-cutting-point, and added
to a end-cutting-point.
Returns:
list: A list of of: class: `audiomate.annotations.LabelList`.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>> Label('c', 11, 15),
>>>])
>>>
>>> res = ll.split([4.1, 8.9, 12.0])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.1)]
>>> res[1].labels
[
Label('a', 4.1, 5.0),
Label('b', 5.0, 8.9)
]
>>> res[2].labels
[
Label('b', 8.9, 10.0),
Label('c', 11.0, 12.0)
]
>>> res[3].labels
[Label('c', 12.0, 15.0)]
If ``shift_times = True``, the times are adjusted to be relative
to the cutting-points for every label-list but the first.
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>>])
>>>
>>> res = ll.split([4.6])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.6)]
>>> res[1].labels
[
Label('a', 0.0, 0.4),
Label('b', 0.4, 5.4)
]
"""
if len(cutting_points) == 0:
raise ValueError('At least one cutting-point is needed!')
# we have to loop in sorted order
cutting_points = sorted(cutting_points)
splits = []
iv_start = 0.0
for i in range(len(cutting_points) + 1):
if i < len(cutting_points):
iv_end = cutting_points[i]
else:
iv_end = float('inf')
# get all intervals intersecting range
intervals = self.label_tree.overlap(
iv_start - overlap,
iv_end + overlap
)
cp_splits = LabelList(idx=self.idx)
# Extract labels from intervals with updated times
for iv in intervals:
label = copy.deepcopy(iv.data)
label.start = max(0, iv_start - overlap, label.start)
label.end = min(iv_end + overlap, label.end)
if shift_times:
orig_start = max(0, iv_start - overlap)
label.start -= orig_start
label.end -= orig_start
cp_splits.add(label)
splits.append(cp_splits)
iv_start = iv_end
return splits
#
# Convenience Constructors
#
@classmethod
def create_single(cls, value, idx='default'):
"""
Create a label-list with a single label
containing the given value.
"""
return LabelList(idx=idx, labels=[
Label(value=value)
])
@classmethod
def with_label_values(cls, values, idx='default'):
"""
Create a new label-list containing labels with the given values.
All labels will have default start/end values of 0 and ``inf``.
Args:
values(list): List of values(str) that should be created and
appended to the label-list.
idx(str): The idx of the label-list.
Returns:
(LabelList): New label-list.
Example:
>>> ll = LabelList.with_label_values(['a', 'x', 'z'], idx='letters')
>>> ll.idx
'letters'
>>> ll.labels
[
Label('a', 0, inf),
Label('x', 0, inf),
Label('z', 0, inf),
]
"""
ll = LabelList(idx=idx)
for label_value in values:
ll.add(Label(label_value))
return ll
|
5831
|
import asyncio
import functools
import time
import weakref
from collections import defaultdict
from typing import AsyncIterable
from typing import Awaitable
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import TypeVar
T = TypeVar("T")
# NOTE: this method is not thread-safe due to lack of locking while checking
# and updating the cache
def async_ttl_cache(
ttl: Optional[float] = 300,
cleanup_self: bool = False,
*,
cache: Optional[Dict] = None,
) -> Callable[
[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner
]:
async def call_or_get_from_cache(cache, async_func, args_for_key, args, kwargs):
# Please note that anything which is put into `key` will be in the
# cache forever, potentially causing memory leaks. The most common
# case is the `self` arg pointing to a huge object. To mitigate that
# we're using `args_for_key`, which is supposed not contain any huge
# objects.
key = functools._make_key(args_for_key, kwargs, typed=False)
try:
future, last_update = cache[key]
if ttl is not None and time.time() - last_update > ttl:
raise KeyError
except KeyError:
future = asyncio.ensure_future(async_func(*args, **kwargs))
# set the timestamp to +infinity so that we always wait on the in-flight request.
cache[key] = (future, float("Inf"))
try:
value = await future
except Exception:
# Only update the cache if it's the same future we awaited and
# it hasn't already been updated by another coroutine
# Note also that we use get() in case the key was deleted from the
# cache by another coroutine
if cache.get(key) == (future, float("Inf")):
del cache[key]
raise
else:
if cache.get(key) == (future, float("Inf")):
cache[key] = (future, time.time())
return value
if cleanup_self:
instance_caches: Dict = cache if cache is not None else defaultdict(dict)
def on_delete(w):
del instance_caches[w]
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(self, *args, **kwargs):
w = weakref.ref(self, on_delete)
self_cache = instance_caches[w]
return await call_or_get_from_cache(
self_cache, wrapped, args, (self,) + args, kwargs
)
return inner
else:
cache2: Dict = cache if cache is not None else {} # Should be Dict[Any, T] but that doesn't work.
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(*args, **kwargs):
return await call_or_get_from_cache(cache2, wrapped, args, args, kwargs)
return inner
return outer
async def aiter_to_list(aiter: AsyncIterable[T],) -> List[T]:
return [x async for x in aiter]
def async_timeout(
seconds: int = 10,
) -> Callable[
[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner
]:
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(*args, **kwargs):
return await asyncio.wait_for(wrapped(*args, **kwargs), timeout=seconds)
return inner
return outer
|
5837
|
import os
import sys
from glob import glob
def create_list(images_dir, output_file, img_ext=".jpg"):
ImgList = os.listdir(images_dir)
val_list = []
for img in ImgList:
img,ext = img.split(".")
val_list.append(img)
with open(os.path.join(images_dir, output_file),'w') as fid:
for line in val_list[:-1]:
fid.write(line + "\n")
fid.write(val_list[-1])
def main():
if len(sys.argv) < 2:
print("Requires images directory")
sys.exit(1)
elif len(sys.argv) < 3:
images_dir = sys.argv[1]
output_file = "image_list.txt"
else:
images_dir = sys.argv[1]
output_file = sys.argv[2]
create_list(images_dir, output_file)
if __name__=="__main__":
main()
|
5842
|
import shlex
from os import path
from itertools import imap, ifilter
from urlparse import urljoin
from .css import CSSParser, iter_events
def parse_config_stmt(line, prefix="spritemapper."):
line = line.strip()
if line.startswith(prefix) and "=" in line:
(key, value) = line.split("=", 1)
return (key[len(prefix):].strip(), value.strip())
def iter_config_stmts(data):
return ifilter(None, imap(parse_config_stmt, data.splitlines()))
def iter_css_config(parser):
for ev in iter_events(parser, lexemes=("comment",)):
for v in iter_config_stmts(ev.comment):
yield v
class CSSConfig(object):
def __init__(self, parser=None, base=None, root=None, fname=None):
if fname and root is None:
root = path.dirname(fname)
self.root = root
self._data = dict(base) if base else {}
if parser is not None:
self._data.update(iter_css_config(parser))
def __iter__(self):
# this is mostly so you can go CSSConfig(base=CSSConfig(..))
return self._data.iteritems()
@classmethod
def from_file(cls, fname):
with open(fname, "rb") as fp:
return cls(CSSParser.from_file(fp), fname=fname)
def normpath(self, p):
"""Normalize a possibly relative path *p* to the root."""
return path.normpath(path.join(self.root, p))
def absurl(self, p):
"""Make an absolute reference to *p* from any configured base URL."""
base = self.base_url
if base:
p = urljoin(base, p)
return p
@property
def base_url(self):
return self._data.get("base_url")
@property
def sprite_dirs(self):
if "sprite_dirs" not in self._data:
return
elif self._data.get("output_image"):
raise RuntimeError("cannot have sprite_dirs "
"when output_image is set")
sdirs = shlex.split(self._data["sprite_dirs"])
return map(self.normpath, sdirs)
@property
def output_image(self):
if "output_image" in self._data:
return self.normpath(self._data["output_image"])
@property
def is_mapping_recursive(self):
rv = self._data.get("recursive")
if rv and self._data.get("output_image"):
raise RuntimeError("cannot have recursive spritemapping "
"when output_image is set")
elif rv is None:
return not self._data.get("output_image")
else:
return bool(rv)
@property
def padding(self):
return self._data.get("padding", (1, 1))
@property
def anneal_steps(self):
return int(self._data.get("anneal_steps", 9200))
def get_spritemap_out(self, dn):
"Get output image filename for spritemap directory *dn*."
if "output_image" in self._data:
return self.output_image
return dn + ".png"
def get_spritemap_url(self, fname):
"Get output image URL for spritemap *fname*."
return self.absurl(path.relpath(fname, self.root))
def get_css_out(self, fname):
"Get output image filename for spritemap directory *fname*."
(dirn, base) = path.split(fname)
if "output_css" in self._data:
(base, ext) = path.splitext(base)
names = dict(filename=fname, dirname=dirn,
basename=base, extension=ext)
return self.normpath(self._data["output_css"].format(**names))
else:
return path.join(dirn, "sm_" + base)
def print_config(fname):
from pprint import pprint
from .css import CSSParser
with open(fname, "rb") as fp:
print "%s\n%s\n" % (fname, "=" * len(fname))
pprint(dict(iter_css_config(CSSParser.read_file(fp))))
print
def main():
import sys
for fn in sys.argv[1:]:
print_config(fn)
if __name__ == "__main__":
main()
|
5888
|
from mushroom_rl.utils.plots import PlotItemBuffer, DataBuffer
from mushroom_rl.utils.plots.plot_item_buffer import PlotItemBufferLimited
class RewardPerStep(PlotItemBuffer):
"""
Class that represents a plot for the reward at every step.
"""
def __init__(self, plot_buffer):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used.
"""
title = "Step_Reward"
curves_params = [dict(data_buffer=plot_buffer)]
super().__init__(title, curves_params)
class RewardPerEpisode(PlotItemBuffer):
"""
Class that represents a plot for the accumulated reward per episode.
"""
def __init__(self, plot_buffer):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used.
"""
title = "Episode_Reward"
curves_params = [dict(data_buffer=plot_buffer)]
super().__init__(title, curves_params)
class Actions(PlotItemBufferLimited):
"""
Class that represents a plot for the actions.
"""
def __init__(self, plot_buffers, maxs=None, mins=None):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used;
maxs(list, None): list of max values of each data buffer plotted.
If an element is None, no max line is drawn;
mins(list, None): list of min values of each data buffer plotted.
If an element is None, no min line is drawn.
"""
title = "Actions"
super().__init__(title, plot_buffers, maxs=maxs, mins=mins)
class Observations(PlotItemBufferLimited):
"""
Class that represents a plot for the observations.
"""
def __init__(self, plot_buffers, maxs=None, mins=None, dotted_limits=None):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used;
maxs(list, None): list of max values of each data buffer plotted.
If an element is None, no max line is drawn;
mins(list, None): list of min values of each data buffer plotted.
If an element is None, no min line is drawn.
dotted_limits (list, None): list of booleans. If True, the
corresponding limit is dotted; otherwise, it is printed as a
solid line.
"""
title = "Observations"
super().__init__(title, plot_buffers, maxs=maxs, mins=mins,
dotted_limits=dotted_limits)
class LenOfEpisodeTraining(PlotItemBuffer):
"""
Class that represents a plot for the length of the episode.
"""
def __init__(self, plot_buffer):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used;
"""
title = "Len of Episode"
plot_params = [dict(data_buffer=plot_buffer)]
super().__init__(title, plot_params)
|
5898
|
import json
import csv
import sys
import os
import re
import codecs
import logging
from logging.config import dictConfig
import click
import yaml
from sqlalchemy import create_engine
from jsontableschema_sql import Storage
from smart_open import smart_open
from . import postgres
from . import carto
csv.field_size_limit(sys.maxsize)
def get_logger(logging_config):
try:
with open(logging_config) as file:
config = yaml.load(file)
dictConfig(config)
except:
FORMAT = '[%(asctime)-15s] %(levelname)s [%(name)s] %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO, stream=sys.stderr)
logger = logging.getLogger('the_el')
def exception_handler(type, value, tb):
logger.exception("Uncaught exception: {}".format(str(value)), exc_info=(type, value, tb))
sys.excepthook = exception_handler
return logger
@click.group()
def main():
pass
def get_connection_string(connection_string):
connection_string = os.getenv('CONNECTION_STRING', connection_string)
if connection_string == None:
raise Exception('`CONNECTION_STRING` environment variable or `--connection-string` option required')
return connection_string
def create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=None, to_srid=None):
engine = create_engine(connection_string)
storage = Storage(engine, dbschema=db_schema, geometry_support=geometry_support, from_srid=from_srid, to_srid=to_srid, views=True)
return engine, storage
def fopen(file, mode='r'):
if file == None:
if mode == 'r':
return sys.stdin
elif mode == 'w':
return sys.stdout
else:
return smart_open(file, mode=mode)
def get_table_schema(table_schema_path):
with fopen(table_schema_path) as file:
contents = file.read()
if not isinstance(contents, str):
contents = contents.decode('utf-8')
return json.loads(contents)
@main.command()
@click.argument('table_name')
@click.option('--connection-string')
@click.option('-o','--output-file')
@click.option('--db-schema')
@click.option('--geometry-support')
def describe_table(table_name, connection_string, output_file, db_schema, geometry_support):
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support)
descriptor = storage.describe(table_name)
with fopen(output_file, mode='w') as file:
json.dump(descriptor, file)
@main.command()
@click.argument('table_name')
@click.argument('table_schema_path')
@click.option('--connection-string')
@click.option('--db-schema')
@click.option('--indexes-fields')
@click.option('--geometry-support')
@click.option('--if-not-exists', is_flag=True, default=False)
@click.option('--logging-config', default='logging_config.conf')
def create_table(table_name,
table_schema_path,
connection_string,
db_schema,
indexes_fields,
geometry_support,
if_not_exists,
logging_config):
logger = get_logger(logging_config)
table_schema = get_table_schema(table_schema_path)
if indexes_fields != None:
indexes_fields = indexes_fields.split(',')
if re.match(carto.carto_connection_string_regex, connection_string) != None:
load_postgis = geometry_support == 'postgis'
logger.info('{} - Creating table using Carto'.format(table_name))
return carto.create_table(logger, table_name, load_postgis, table_schema, if_not_exists, indexes_fields, connection_string)
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support)
logger.info('{} - Creating table using SQLAlchemy'.format(table_name))
storage.create(table_name, table_schema, indexes_fields=indexes_fields)
@main.command()
@click.argument('table_name')
@click.option('--table-schema-path')
@click.option('--connection-string')
@click.option('-f','--input-file')
@click.option('--db-schema')
@click.option('--geometry-support')
@click.option('--from-srid')
@click.option('--skip-headers', is_flag=True)
@click.option('--indexes-fields')
@click.option('--upsert', is_flag=True)
@click.option('--truncate/--no-truncate', is_flag=True, default=False)
@click.option('--logging-config', default='logging_config.conf')
def write(table_name,
table_schema_path,
connection_string,
input_file,
db_schema,
geometry_support,
from_srid,
skip_headers,
indexes_fields,
upsert,
truncate,
logging_config):
logger = get_logger(logging_config)
table_schema = get_table_schema(table_schema_path)
## TODO: csv settings? use Frictionless Data csv standard?
## TODO: support line delimted json?
with fopen(input_file) as file:
rows = csv.reader(file)
if skip_headers:
next(rows)
if re.match(carto.carto_connection_string_regex, connection_string) != None:
load_postgis = geometry_support == 'postgis'
if indexes_fields != None:
indexes_fields = indexes_fields.split(',')
logger.info('{} - Writing to table using Carto'.format(table_name))
carto.load(logger,
db_schema,
table_name,
load_postgis,
table_schema,
connection_string,
rows,
indexes_fields,
truncate)
else:
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid)
## TODO: truncate? carto does. Makes this idempotent
logger.info('{} - Writing to table using SQLAlchemy'.format(table_name))
if table_schema_path != None:
table_schema = get_table_schema(table_schema_path)
storage.describe(table_name, descriptor=table_schema)
else:
storage.describe(table_name)
if upsert:
postgres.upsert(engine, db_schema, table_name, table_schema, rows)
elif geometry_support == None and engine.dialect.driver == 'psycopg2':
postgres.copy_from(engine, table_name, table_schema, rows)
else:
storage.write(table_name, rows)
@main.command()
@click.argument('table_name')
@click.option('--connection-string')
@click.option('-o','--output-file')
@click.option('--db-schema')
@click.option('--geometry-support')
@click.option('--from-srid')
@click.option('--to-srid')
@click.option('--logging-config', default='logging_config.conf')
def read(table_name, connection_string, output_file, db_schema, geometry_support, from_srid, to_srid, logging_config):
logger = get_logger(logging_config)
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid, to_srid=to_srid)
## TODO: csv settings? use Frictionless Data csv standard?
## TODO: support line delimited json?
with fopen(output_file, mode='w') as file:
writer = csv.writer(file)
descriptor = storage.describe(table_name)
fields = map(lambda x: x['name'], descriptor['fields'])
writer.writerow(fields)
if geometry_support == None and engine.dialect.driver == 'psycopg2':
postgres.copy_to(engine, table_name, file)
else:
for row in storage.iter(table_name):
row_out = []
for field in row:
if isinstance(field, dict) or isinstance(field, list):
field = json.dumps(field)
row_out.append(field)
writer.writerow(row_out)
@main.command()
@click.argument('new_table_name')
@click.argument('old_table_name')
@click.option('--connection-string')
@click.option('--db-schema')
@click.option('--select-users', help='Users to grant SELECT on updated table')
@click.option('--logging-config', default='logging_config.conf')
def swap_table(new_table_name, old_table_name, connection_string, db_schema, select_users, logging_config):
logger = get_logger(logging_config)
if re.match(carto.carto_connection_string_regex, connection_string) != None:
if select_users != None:
select_users = select_users.split(',')
else:
select_users = []
logger.info('Swapping tables using Carto: {} - {}'.format(new_table_name, old_table_name))
return carto.swap_table(logger, db_schema, new_table_name, old_table_name, select_users, connection_string)
connection_string = get_connection_string(connection_string)
engine = create_engine(connection_string)
if engine.dialect.driver == 'psycopg2':
logger.info('Swapping tables using psycopg2: {} - {}'.format(new_table_name, old_table_name))
conn = engine.raw_connection()
try:
with conn.cursor() as cur:
sql = 'ALTER TABLE "{}" RENAME TO "{}_old";'.format(old_table_name, old_table_name) +\
'ALTER TABLE "{}" RENAME TO "{}";'.format(new_table_name, old_table_name) +\
'DROP TABLE "{}_old";'.format(old_table_name)
cur.execute(sql)
conn.commit()
except:
conn.rollback()
raise
conn.close()
elif engine.dialect.driver == 'cx_oracle':
logger.info('Swapping tables using cx_Oracle: {} - {}'.format(new_table_name, old_table_name))
conn = engine.connect()
if select_users != None:
select_users = select_users.split(',')
else:
select_users = []
grants_sql = []
for user in select_users:
grants_sql.append('GRANT SELECT ON {} TO {}'.format(old_table_name, user.strip()))
# Oracle does not allow table modification within a transaction, so make individual transactions:
sql1 = 'ALTER TABLE {} RENAME TO {}_old'.format(old_table_name, old_table_name)
sql2 = 'ALTER TABLE {} RENAME TO {}'.format(new_table_name, old_table_name)
sql3 = 'DROP TABLE {}_old'.format(old_table_name)
try:
conn.execute(sql1)
except:
logger.error("Could not rename {} table. Does it exist?".format(old_table_name))
raise
try:
conn.execute(sql2)
except:
logger.error("Could not rename {} table. Does it exist?".format(new_table_name))
rb_sql = 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name)
conn.execute(rb_sql)
raise
try:
conn.execute(sql3)
except:
logger.error("Could not drop {}_old table. Do you have permission?".format(old_table_name))
rb_sql1 = 'DROP TABLE {}'.format(old_table_name)
conn.execute(rb_sql1)
rb_sql2 = 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name)
conn.execute(rb_sql2)
raise
try:
for sql in grants_sql:
conn.execute(sql)
except:
logger.error("Could not grant all permissions to {}.".format(old_table_name))
raise
else:
raise Exception('`{}` not supported by swap_table'.format(engine.dialect.driver))
|
5906
|
import os.path
from .. import *
class TestMixed(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'mixed'), *args, **kwargs)
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')], 'hello from c++!\n')
class TestMixedLibrary(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'mixed_library'), *args,
**kwargs)
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')], 'hello, library!\n')
@skip_if('fortran' not in test_features, 'skipping fortran tests')
# XXX: This fails on macOS, probably because of a version mismatch somewhere.
@skip_if(env.host_platform.genus == 'darwin', 'fortran on os x is weird')
class TestMixedFortran(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'mixed_fortran'), *args,
**kwargs)
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')], 'hello from f77!\n')
|
5907
|
from collections import namedtuple
# Basic example
Point = namedtuple('Point', ['x', 'y'])
p = Point(11, y=22)
print(p[0] + p[1])
x, y = p
print(x, y)
print(p.x + p.y)
print(Point(x=11, y=22))
from collections import namedtuple
import csv
f = open("users.csv", "r")
next(f)
reader = csv.reader(f)
student_list = []
for row in reader:
student_list.append(row)
print(row)
print(student_list)
columns = ["user_id", "integration_id", "login_id", "password", "first_name",
"last_name", "full_name", "sortable_name", "short_name",
"email", "status"]
Student = namedtuple('Student', columns)
student_namedtupe_list = []
for row in student_list:
student = Student(*row)
student_namedtupe_list.append(student)
print(student_namedtupe_list[0])
print(student_namedtupe_list[0].full_name)
|
5945
|
import torch
__author__ = 'Andres'
def calc_gradient_penalty_bayes(discriminator, real_data, fake_data, gamma):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = real_data.size()[0]
alpha = torch.rand(batch_size, 1, 1, 1)
alpha = alpha.expand(real_data.size()).to(device)
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = torch.autograd.Variable(interpolates, requires_grad=True).to(device)
disc_interpolates = discriminator(interpolates)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2) - 1) ** 2) * gamma
return gradient_penalty
|
5949
|
import os
import sys
import unittest
import torch
import torch._C
from pathlib import Path
from test_nnapi import TestNNAPI
from torch.testing._internal.common_utils import TEST_WITH_ASAN
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
"""
Unit Tests for Nnapi backend with delegate
Inherits most tests from TestNNAPI, which loads Android NNAPI models
without the delegate API.
"""
# First skip is needed for IS_WINDOWS or IS_MACOS to skip the tests.
# Second skip is because ASAN is currently causing an error.
# It is still unclear how to resolve this. T95764916
torch_root = Path(__file__).resolve().parent.parent.parent
lib_path = torch_root / 'build' / 'lib' / 'libnnapi_backend.so'
@unittest.skipIf(not os.path.exists(lib_path),
"Skipping the test as libnnapi_backend.so was not found")
@unittest.skipIf(TEST_WITH_ASAN, "Unresolved bug with ASAN")
class TestNnapiBackend(TestNNAPI):
def setUp(self):
super().setUp()
# Save default dtype
module = torch.nn.PReLU()
self.default_dtype = module.weight.dtype
# Change dtype to float32 (since a different unit test changed dtype to float64,
# which is not supported by the Android NNAPI delegate)
# Float32 should typically be the default in other files.
torch.set_default_dtype(torch.float32)
# Load nnapi delegate library
torch.ops.load_library(str(lib_path))
# Override
def call_lowering_to_nnapi(self, traced_module, args):
compile_spec = {"forward": {"inputs": args}}
return torch._C._jit_to_backend("nnapi", traced_module, compile_spec)
def test_tensor_input(self):
# Lower a simple module
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
# Argument input is a single Tensor
self.call_lowering_to_nnapi(traced, args)
# Argument input is a Tensor in a list
self.call_lowering_to_nnapi(traced, [args])
# Test exceptions for incorrect compile specs
def test_compile_spec_santiy(self):
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
errorMsgTail = r"""
method_compile_spec should contain a Tensor or Tensor List which bundles input parameters: shape, dtype, quantization, and dimorder.
For input shapes, use 0 for run/load time flexible input.
method_compile_spec must use the following format:
{"forward": {"inputs": at::Tensor}} OR {"forward": {"inputs": c10::List<at::Tensor>}}"""
# No forward key
compile_spec = {"backward": {"inputs": args}}
with self.assertRaisesRegex(RuntimeError, "method_compile_spec does not contain the \"forward\" key." + errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No dictionary under the forward key
compile_spec = {"forward": 1}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No inputs key (in the dictionary under the forward key)
compile_spec = {"forward": {"not inputs": args}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No Tensor or TensorList under the inputs key
compile_spec = {"forward": {"inputs": 1}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
compile_spec = {"forward": {"inputs": [1]}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
def tearDown(self):
# Change dtype back to default (Otherwise, other unit tests will complain)
torch.set_default_dtype(self.default_dtype)
|
5974
|
import inspect
def get_default_args(func):
"""Get default arguments of a function.
"""
signature = inspect.signature(func)
return {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
|
6018
|
from typing import Union
from unittest import mock
import graphene
import pytest
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.template.defaultfilters import slugify
from graphene.utils.str_converters import to_camel_case
from saleor.core.taxes import zero_money
from saleor.graphql.core.utils import snake_to_camel_case
from saleor.graphql.product.enums import AttributeTypeEnum, AttributeValueType
from saleor.graphql.product.filters import filter_attributes_by_product_types
from saleor.graphql.product.mutations.attributes import validate_value_is_unique
from saleor.graphql.product.types.attributes import resolve_attribute_value_type
from saleor.product import AttributeInputType
from saleor.product.error_codes import ProductErrorCode
from saleor.product.models import (
Attribute,
AttributeProduct,
AttributeValue,
AttributeVariant,
Category,
Collection,
Product,
ProductType,
ProductVariant,
)
from saleor.product.utils.attributes import associate_attribute_values_to_instance
from tests.api.utils import get_graphql_content
def test_validate_value_is_unique(color_attribute):
value = color_attribute.values.first()
# a new value but with existing slug should raise an error
with pytest.raises(ValidationError):
validate_value_is_unique(color_attribute, AttributeValue(slug=value.slug))
# a new value with a new slug should pass
validate_value_is_unique(
color_attribute, AttributeValue(slug="spanish-inquisition")
)
# value that already belongs to the attribute shouldn't be taken into account
validate_value_is_unique(color_attribute, value)
def test_get_single_attribute_by_pk(user_api_client, color_attribute_without_values):
attribute_gql_id = graphene.Node.to_global_id(
"Attribute", color_attribute_without_values.id
)
query = """
query($id: ID!) {
attribute(id: $id) {
id
slug
}
}
"""
content = get_graphql_content(
user_api_client.post_graphql(query, {"id": attribute_gql_id})
)
assert content["data"]["attribute"], "Should have found an attribute"
assert content["data"]["attribute"]["id"] == attribute_gql_id
assert content["data"]["attribute"]["slug"] == color_attribute_without_values.slug
QUERY_ATTRIBUTES = """
query {
attributes(first: 20) {
edges {
node {
id
name
slug
values {
id
name
slug
}
}
}
}
}
"""
def test_attributes_query(user_api_client, product):
attributes = Attribute.objects
query = QUERY_ATTRIBUTES
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert attributes_data
assert len(attributes_data) == attributes.count()
def test_attributes_query_hidden_attribute(user_api_client, product, color_attribute):
query = QUERY_ATTRIBUTES
# hide the attribute
color_attribute.visible_in_storefront = False
color_attribute.save(update_fields=["visible_in_storefront"])
attribute_count = Attribute.objects.get_visible_to_user(
user_api_client.user
).count()
assert attribute_count == 1
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert len(attributes_data) == attribute_count
def test_attributes_query_hidden_attribute_as_staff_user(
staff_api_client, product, color_attribute, permission_manage_products
):
query = QUERY_ATTRIBUTES
# hide the attribute
color_attribute.visible_in_storefront = False
color_attribute.save(update_fields=["visible_in_storefront"])
attribute_count = Attribute.objects.all().count()
# The user doesn't have the permission yet to manage products,
# the user shouldn't be able to see the hidden attributes
assert Attribute.objects.get_visible_to_user(staff_api_client.user).count() == 1
# The user should now be able to see the attributes
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert len(attributes_data) == attribute_count
QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES = """
{
products(first: 1) {
edges {
node {
attributes {
attribute {
slug
}
values {
slug
}
value {
slug
}
}
variants {
attributes {
attribute {
slug
}
values {
slug
}
value {
slug
}
}
}
}
}
}
}
"""
@pytest.mark.parametrize("is_staff", (False, True))
def test_resolve_attributes_with_hidden(
user_api_client,
product,
color_attribute,
size_attribute,
staff_user,
is_staff,
permission_manage_products,
):
"""Ensure non-staff users don't see hidden attributes, and staff users having
the 'manage product' permission can.
"""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
product_attribute = color_attribute
variant_attribute = size_attribute
expected_product_attribute_count = product.attributes.count() - 1
expected_variant_attribute_count = variant.attributes.count() - 1
if is_staff:
api_client.user = staff_user
expected_product_attribute_count += 1
expected_variant_attribute_count += 1
staff_user.user_permissions.add(permission_manage_products)
# Hide one product and variant attribute from the storefront
for attribute in (product_attribute, variant_attribute):
attribute.visible_in_storefront = False
attribute.save(update_fields=["visible_in_storefront"])
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
assert len(product["attributes"]) == expected_product_attribute_count
assert len(product["variants"][0]["attributes"]) == expected_variant_attribute_count
def test_resolve_attribute_values(user_api_client, product, staff_user):
"""Ensure the attribute values are properly resolved."""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
assert product.attributes.count() == 1
assert variant.attributes.count() == 1
product_attribute_values = list(
product.attributes.first().values.values_list("slug", flat=True)
)
variant_attribute_values = list(
variant.attributes.first().values.values_list("slug", flat=True)
)
assert len(product_attribute_values) == 1
assert len(variant_attribute_values) == 1
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
product_attributes = product["attributes"]
variant_attributes = product["variants"][0]["attributes"]
assert len(product_attributes) == len(product_attribute_values)
assert len(variant_attributes) == len(variant_attribute_values)
assert product_attributes[0]["attribute"]["slug"] == "color"
assert product_attributes[0]["values"][0]["slug"] == product_attribute_values[0]
assert product_attributes[0]["value"]["slug"] == product_attribute_values[0]
assert variant_attributes[0]["attribute"]["slug"] == "size"
assert variant_attributes[0]["values"][0]["slug"] == variant_attribute_values[0]
assert variant_attributes[0]["value"]["slug"] == variant_attribute_values[0]
def test_resolve_attribute_values_non_assigned_to_node(
user_api_client, product, staff_user
):
"""Ensure the attribute values are properly resolved when an attribute is part
of the product type but not of the node (product/variant), thus no values should be
resolved.
"""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
product_type = product.product_type
# Create dummy attributes
unassigned_product_attribute = Attribute.objects.create(name="P", slug="product")
unassigned_variant_attribute = Attribute.objects.create(name="V", slug="variant")
# Create a value for each dummy attribute to ensure they are not returned
# by the product or variant as they are not associated to them
AttributeValue.objects.bulk_create(
[
AttributeValue(slug="a", name="A", attribute=unassigned_product_attribute),
AttributeValue(slug="b", name="B", attribute=unassigned_product_attribute),
]
)
# Assign the dummy attributes to the product type and push them at the top
# through a sort_order=0 as the other attributes have sort_order=null
AttributeProduct.objects.create(
attribute=unassigned_product_attribute, product_type=product_type, sort_order=0
)
AttributeVariant.objects.create(
attribute=unassigned_variant_attribute, product_type=product_type, sort_order=0
)
assert product.attributes.count() == 1
assert variant.attributes.count() == 1
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
product_attributes = product["attributes"]
variant_attributes = product["variants"][0]["attributes"]
assert len(product_attributes) == 2, "Non-assigned attr from the PT may be missing"
assert len(variant_attributes) == 2, "Non-assigned attr from the PT may be missing"
assert product_attributes[0]["attribute"]["slug"] == "product"
assert product_attributes[0]["values"] == []
assert variant_attributes[0]["value"] is None
assert variant_attributes[0]["attribute"]["slug"] == "variant"
assert variant_attributes[0]["values"] == []
assert variant_attributes[0]["value"] is None
def test_attributes_filter_by_product_type_with_empty_value():
"""Ensure passing an empty or null value is ignored and the queryset is simply
returned without any modification.
"""
qs = Attribute.objects.all()
assert filter_attributes_by_product_types(qs, "...", "") is qs
assert filter_attributes_by_product_types(qs, "...", None) is qs
def test_attributes_filter_by_product_type_with_unsupported_field():
"""Ensure using an unknown field to filter attributes by raises a NotImplemented
exception.
"""
qs = Attribute.objects.all()
with pytest.raises(NotImplementedError) as exc:
filter_attributes_by_product_types(qs, "in_space", "a-value")
assert exc.value.args == ("Filtering by in_space is unsupported",)
def test_attributes_filter_by_non_existing_category_id():
"""Ensure using a non-existing category ID returns an empty query set."""
category_id = graphene.Node.to_global_id("Category", -1)
mocked_qs = mock.MagicMock()
qs = filter_attributes_by_product_types(mocked_qs, "in_category", category_id)
assert qs == mocked_qs.none.return_value
@pytest.mark.parametrize("test_deprecated_filter", [True, False])
@pytest.mark.parametrize("tested_field", ["inCategory", "inCollection"])
def test_attributes_in_collection_query(
user_api_client,
product_type,
category,
collection,
collection_with_products,
test_deprecated_filter,
tested_field,
):
if "Collection" in tested_field:
filtered_by_node_id = graphene.Node.to_global_id("Collection", collection.pk)
elif "Category" in tested_field:
filtered_by_node_id = graphene.Node.to_global_id("Category", category.pk)
else:
raise AssertionError(tested_field)
expected_qs = Attribute.objects.filter(
Q(attributeproduct__product_type_id=product_type.pk)
| Q(attributevariant__product_type_id=product_type.pk)
)
# Create another product type and attribute that shouldn't get matched
other_category = Category.objects.create(name="Other Category", slug="other-cat")
other_attribute = Attribute.objects.create(name="Other", slug="other")
other_product_type = ProductType.objects.create(
name="Other type", has_variants=True, is_shipping_required=True
)
other_product_type.product_attributes.add(other_attribute)
other_product = Product.objects.create(
name=f"Another Product",
product_type=other_product_type,
category=other_category,
price=zero_money(),
is_published=True,
)
# Create another collection with products but shouldn't get matched
# as we don't look for this other collection
other_collection = Collection.objects.create(
name="Other Collection",
slug="other-collection",
is_published=True,
description="Description",
)
other_collection.products.add(other_product)
query = """
query($nodeID: ID!) {
attributes(first: 20, %(filter_input)s) {
edges {
node {
id
name
slug
}
}
}
}
"""
if test_deprecated_filter:
query = query % {"filter_input": f"{tested_field}: $nodeID"}
else:
query = query % {"filter_input": "filter: { %s: $nodeID }" % tested_field}
variables = {"nodeID": filtered_by_node_id}
content = get_graphql_content(user_api_client.post_graphql(query, variables))
attributes_data = content["data"]["attributes"]["edges"]
flat_attributes_data = [attr["node"]["slug"] for attr in attributes_data]
expected_flat_attributes_data = list(expected_qs.values_list("slug", flat=True))
assert flat_attributes_data == expected_flat_attributes_data
CREATE_ATTRIBUTES_QUERY = """
mutation createAttribute($name: String!, $values: [AttributeValueCreateInput]) {
attributeCreate(input: {name: $name, values: $values}) {
errors {
field
message
}
productErrors {
field
message
code
}
attribute {
name
slug
values {
name
slug
}
productTypes(first: 10) {
edges {
node {
id
}
}
}
}
}
}
"""
def test_create_attribute_and_attribute_values(
staff_api_client, permission_manage_products
):
query = CREATE_ATTRIBUTES_QUERY
attribute_name = "<NAME>"
name = "Value name"
variables = {"name": attribute_name, "values": [{"name": name}]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
assert not content["data"]["attributeCreate"]["errors"]
data = content["data"]["attributeCreate"]
# Check if the attribute was correctly created
assert data["attribute"]["name"] == attribute_name
assert data["attribute"]["slug"] == slugify(
attribute_name
), "The default slug should be the slugified name"
assert (
data["attribute"]["productTypes"]["edges"] == []
), "The attribute should not have been assigned to a product type"
# Check if the attribute values were correctly created
assert len(data["attribute"]["values"]) == 1
assert data["attribute"]["values"][0]["name"] == name
assert data["attribute"]["values"][0]["slug"] == slugify(name)
@pytest.mark.parametrize(
"input_slug, expected_slug, expected_error",
(
("my-slug", "my-slug", []),
(None, "my-name", []),
(
"",
None,
[{"field": "slug", "message": "The attribute's slug cannot be blank."}],
),
),
)
def test_create_attribute_with_given_slug(
staff_api_client,
permission_manage_products,
input_slug,
expected_slug,
expected_error,
):
staff_api_client.user.user_permissions.add(permission_manage_products)
query = """
mutation createAttribute(
$name: String!, $slug: String) {
attributeCreate(input: {name: $name, slug: $slug}) {
errors {
field
message
}
attribute {
slug
}
}
}
"""
attribute_name = "My Name"
variables = {"name": attribute_name, "slug": input_slug}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))
# Check if the error is as expected: null or something else
assert content["data"]["attributeCreate"]["errors"] == expected_error
# Check if the slug was correctly set if no error was expected
if expected_error is None:
assert content["data"]["attributeCreate"]["attribute"]["slug"] == expected_slug
@pytest.mark.parametrize(
"name_1, name_2, error_msg, error_code",
(
(
"Red color",
"Red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
(
"Red color",
"red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
),
)
def test_create_attribute_and_attribute_values_errors(
staff_api_client,
name_1,
name_2,
error_msg,
error_code,
permission_manage_products,
product_type,
):
query = CREATE_ATTRIBUTES_QUERY
variables = {"name": "Example name", "values": [{"name": name_1}, {"name": name_2}]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeCreate"]["errors"]
assert errors
assert errors[0]["field"] == "values"
assert errors[0]["message"] == error_msg
product_errors = content["data"]["attributeCreate"]["productErrors"]
assert product_errors[0]["code"] == error_code.name
UPDATE_ATTRIBUTE_QUERY = """
mutation updateAttribute(
$id: ID!, $name: String!, $addValues: [AttributeValueCreateInput]!,
$removeValues: [ID]!) {
attributeUpdate(
id: $id,
input: {
name: $name, addValues: $addValues,
removeValues: $removeValues}) {
errors {
field
message
}
productErrors {
field
message
code
}
attribute {
name
slug
values {
name
slug
}
productTypes(first: 10) {
edges {
node {
id
}
}
}
}
}
}
"""
def test_update_attribute_name(
staff_api_client, color_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
name = "<NAME>"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {"name": name, "id": node_id, "addValues": [], "removeValues": []}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
attribute.refresh_from_db()
data = content["data"]["attributeUpdate"]
assert data["attribute"]["name"] == name == attribute.name
assert data["attribute"]["productTypes"]["edges"] == []
def test_update_attribute_remove_and_add_values(
staff_api_client, color_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
name = "<NAME>"
attribute_value_name = "Red Color"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
attribute_value_id = attribute.values.first().id
value_id = graphene.Node.to_global_id("AttributeValue", attribute_value_id)
variables = {
"name": name,
"id": node_id,
"addValues": [{"name": attribute_value_name}],
"removeValues": [value_id],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
attribute.refresh_from_db()
data = content["data"]["attributeUpdate"]
assert not data["errors"]
assert data["attribute"]["name"] == name == attribute.name
assert not attribute.values.filter(pk=attribute_value_id).exists()
assert attribute.values.filter(name=attribute_value_name).exists()
def test_update_empty_attribute_and_add_values(
staff_api_client, color_attribute_without_values, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute_without_values
name = "<NAME>"
attribute_value_name = "Yellow Color"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {
"name": name,
"id": node_id,
"addValues": [{"name": attribute_value_name}],
"removeValues": [],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
get_graphql_content(response)
attribute.refresh_from_db()
assert attribute.values.count() == 1
assert attribute.values.filter(name=attribute_value_name).exists()
@pytest.mark.parametrize(
"name_1, name_2, error_msg, error_code",
(
(
"Red color",
"Red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
(
"Red color",
"red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
),
)
def test_update_attribute_and_add_attribute_values_errors(
staff_api_client,
name_1,
name_2,
error_msg,
error_code,
color_attribute,
permission_manage_products,
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {
"name": "Example name",
"id": node_id,
"removeValues": [],
"addValues": [{"name": name_1}, {"name": name_2}],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeUpdate"]["errors"]
assert errors
assert errors[0]["field"] == "addValues"
assert errors[0]["message"] == error_msg
product_errors = content["data"]["attributeUpdate"]["productErrors"]
assert product_errors[0]["code"] == error_code.name
def test_update_attribute_and_remove_others_attribute_value(
staff_api_client, color_attribute, size_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
size_attribute = size_attribute.values.first()
attr_id = graphene.Node.to_global_id("AttributeValue", size_attribute.pk)
variables = {
"name": "Example name",
"id": node_id,
"slug": "example-slug",
"addValues": [],
"removeValues": [attr_id],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeUpdate"]["errors"]
assert errors
assert errors[0]["field"] == "removeValues"
err_msg = "Value %s does not belong to this attribute." % str(size_attribute)
assert errors[0]["message"] == err_msg
product_errors = content["data"]["attributeUpdate"]["productErrors"]
assert product_errors[0]["code"] == ProductErrorCode.INVALID.name
def test_delete_attribute(
staff_api_client, color_attribute, permission_manage_products, product_type
):
attribute = color_attribute
query = """
mutation deleteAttribute($id: ID!) {
attributeDelete(id: $id) {
errors {
field
message
}
attribute {
id
}
}
}
"""
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {"id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeDelete"]
assert data["attribute"]["id"] == variables["id"]
with pytest.raises(attribute._meta.model.DoesNotExist):
attribute.refresh_from_db()
CREATE_ATTRIBUTE_VALUE_QUERY = """
mutation createAttributeValue(
$attributeId: ID!, $name: String!) {
attributeValueCreate(
attribute: $attributeId, input: {name: $name}) {
productErrors {
field
message
code
}
attribute {
values {
name
}
}
attributeValue {
name
type
slug
}
}
}
"""
def test_create_attribute_value(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
name = "<NAME>"
variables = {"name": name, "attributeId": attribute_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert not data["productErrors"]
attr_data = data["attributeValue"]
assert attr_data["name"] == name
assert attr_data["slug"] == slugify(name)
assert attr_data["type"] == "STRING"
assert name in [value["name"] for value in data["attribute"]["values"]]
def test_create_attribute_value_not_unique_name(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
value_name = attribute.values.first().name
variables = {"name": value_name, "attributeId": attribute_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert data["productErrors"]
assert data["productErrors"][0]["code"] == ProductErrorCode.ALREADY_EXISTS.name
assert data["productErrors"][0]["field"] == "name"
def test_create_attribute_value_capitalized_name(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
value_name = attribute.values.first().name
variables = {"name": value_name.upper(), "attributeId": attribute_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert data["productErrors"]
assert data["productErrors"][0]["code"] == ProductErrorCode.ALREADY_EXISTS.name
assert data["productErrors"][0]["field"] == "name"
UPDATE_ATTRIBUTE_VALUE_QUERY = """
mutation updateChoice(
$id: ID!, $name: String!) {
attributeValueUpdate(
id: $id, input: {name: $name}) {
errors {
field
message
}
attributeValue {
name
slug
}
attribute {
values {
name
}
}
}
}
"""
def test_update_attribute_value(
staff_api_client, pink_attribute_value, permission_manage_products
):
query = UPDATE_ATTRIBUTE_VALUE_QUERY
value = pink_attribute_value
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
name = "Crimson name"
variables = {"name": name, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueUpdate"]
value.refresh_from_db()
assert data["attributeValue"]["name"] == name == value.name
assert data["attributeValue"]["slug"] == slugify(name)
assert name in [value["name"] for value in data["attribute"]["values"]]
def test_update_attribute_value_name_not_unique(
staff_api_client, pink_attribute_value, permission_manage_products
):
query = UPDATE_ATTRIBUTE_VALUE_QUERY
value = pink_attribute_value.attribute.values.create(
name="<NAME>", slug="example-name", value="#RED"
)
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
variables = {"name": pink_attribute_value.name, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueUpdate"]
assert data["errors"]
assert data["errors"][0]["message"]
assert data["errors"][0]["field"] == "name"
def test_delete_attribute_value(
staff_api_client, color_attribute, pink_attribute_value, permission_manage_products
):
value = color_attribute.values.get(name="Red")
query = """
mutation updateChoice($id: ID!) {
attributeValueDelete(id: $id) {
attributeValue {
name
slug
}
}
}
"""
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
variables = {"id": node_id}
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
with pytest.raises(value._meta.model.DoesNotExist):
value.refresh_from_db()
@pytest.mark.parametrize(
"raw_value, expected_type",
[
("#0000", AttributeValueType.COLOR),
("#FF69B4", AttributeValueType.COLOR),
("rgb(255, 0, 0)", AttributeValueType.COLOR),
("hsl(0, 100%, 50%)", AttributeValueType.COLOR),
("hsla(120, 60%, 70%, 0.3)", AttributeValueType.COLOR),
("rgba(100%, 255, 0, 0)", AttributeValueType.COLOR),
("http://example.com", AttributeValueType.URL),
("https://example.com", AttributeValueType.URL),
("ftp://example.com", AttributeValueType.URL),
("example.com", AttributeValueType.STRING),
("Foo", AttributeValueType.STRING),
("linear-gradient(red, yellow)", AttributeValueType.GRADIENT),
("radial-gradient(#0000, yellow)", AttributeValueType.GRADIENT),
],
)
def test_resolve_attribute_value_type(raw_value, expected_type):
assert resolve_attribute_value_type(raw_value) == expected_type
def test_resolve_assigned_attribute_without_values(api_client, product_type, product):
"""Ensure the attributes assigned to a product type are resolved even if
the product doesn't provide any value for it or is not directly associated to it.
"""
# Retrieve the product's variant
variant = product.variants.get()
# Remove all attributes and values from the product and its variant
product.attributesrelated.clear()
variant.attributesrelated.clear()
# Retrieve the product and variant's attributes
products = get_graphql_content(
api_client.post_graphql(
"""
{
products(first: 10) {
edges {
node {
attributes {
attribute {
slug
}
values {
name
}
}
variants {
attributes {
attribute {
slug
}
values {
name
}
}
}
}
}
}
}
"""
)
)["data"]["products"]["edges"]
# Ensure we are only working on one product and variant, the ones we are testing
assert len(products) == 1
assert len(products[0]["node"]["variants"]) == 1
# Retrieve the nodes data
product = products[0]["node"]
variant = product["variants"][0]
# Ensure the product attributes values are all None
assert len(product["attributes"]) == 1
assert product["attributes"][0]["attribute"]["slug"] == "color"
assert product["attributes"][0]["values"] == []
# Ensure the variant attributes values are all None
assert variant["attributes"][0]["attribute"]["slug"] == "size"
assert variant["attributes"][0]["values"] == []
ASSIGN_ATTR_QUERY = """
mutation assign($productTypeId: ID!, $operations: [AttributeAssignInput]!) {
attributeAssign(productTypeId: $productTypeId, operations: $operations) {
errors {
field
message
}
productType {
id
productAttributes {
id
}
variantAttributes {
id
}
}
}
}
"""
def test_assign_attributes_to_product_type(
staff_api_client, permission_manage_products, attribute_list
):
product_type = ProductType.objects.create(name="Default Type", has_variants=True)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = []
variables = {"productTypeId": product_type_global_id, "operations": operations}
product_attributes_ids = {attr.pk for attr in attribute_list[:2]}
variant_attributes_ids = {attr.pk for attr in attribute_list[2:]}
for attr_id in product_attributes_ids:
operations.append(
{"type": "PRODUCT", "id": graphene.Node.to_global_id("Attribute", attr_id)}
)
for attr_id in variant_attributes_ids:
operations.append(
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attr_id)}
)
content = get_graphql_content(
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
)["data"]["attributeAssign"]
assert not content["errors"], "Should have succeeded"
assert content["productType"]["id"] == product_type_global_id
assert len(content["productType"]["productAttributes"]) == len(
product_attributes_ids
)
assert len(content["productType"]["variantAttributes"]) == len(
variant_attributes_ids
)
found_product_attrs_ids = {
int(graphene.Node.from_global_id(attr["id"])[1])
for attr in content["productType"]["productAttributes"]
}
found_variant_attrs_ids = {
int(graphene.Node.from_global_id(attr["id"])[1])
for attr in content["productType"]["variantAttributes"]
}
assert found_product_attrs_ids == product_attributes_ids
assert found_variant_attrs_ids == variant_attributes_ids
def test_assign_variant_attribute_to_product_type_with_disabled_variants(
staff_api_client,
permission_manage_products,
product_type_without_variant,
color_attribute_without_values,
):
"""The assignAttribute mutation should raise an error when trying
to add an attribute as a variant attribute when
the product type doesn't support variants"""
product_type = product_type_without_variant
attribute = color_attribute_without_values
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = [
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attribute.pk)}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": "Variants are disabled in this product type.",
}
]
def test_assign_variant_attribute_having_unsupported_input_type(
staff_api_client, permission_manage_products, product_type, size_attribute
):
"""The assignAttribute mutation should raise an error when trying
to use an attribute as a variant attribute when
the attribute's input type doesn't support variants"""
attribute = size_attribute
attribute.input_type = AttributeInputType.MULTISELECT
attribute.save(update_fields=["input_type"])
product_type.variant_attributes.clear()
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = [
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attribute.pk)}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": (
"Attributes having for input types ['multiselect'] cannot be assigned "
"as variant attributes"
),
}
]
@pytest.mark.parametrize(
"product_type_attribute_type, gql_attribute_type",
(
(AttributeTypeEnum.PRODUCT, AttributeTypeEnum.VARIANT),
(AttributeTypeEnum.VARIANT, AttributeTypeEnum.PRODUCT),
(AttributeTypeEnum.PRODUCT, AttributeTypeEnum.PRODUCT),
(AttributeTypeEnum.VARIANT, AttributeTypeEnum.VARIANT),
),
)
def test_assign_attribute_to_product_type_having_already_that_attribute(
staff_api_client,
permission_manage_products,
color_attribute_without_values,
product_type_attribute_type,
gql_attribute_type,
):
"""The assignAttribute mutation should raise an error when trying
to add an attribute already contained in the product type."""
product_type = ProductType.objects.create(name="Type")
attribute = color_attribute_without_values
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
if product_type_attribute_type == AttributeTypeEnum.PRODUCT:
product_type.product_attributes.add(attribute)
elif product_type_attribute_type == AttributeTypeEnum.VARIANT:
product_type.variant_attributes.add(attribute)
else:
raise ValueError(f"Unknown: {product_type}")
query = ASSIGN_ATTR_QUERY
operations = [
{
"type": gql_attribute_type.value,
"id": graphene.Node.to_global_id("Attribute", attribute.pk),
}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": "Color (color) have already been assigned to this product type.",
}
]
UNASSIGN_ATTR_QUERY = """
mutation unAssignAttribute(
$productTypeId: ID!, $attributeIds: [ID]!
) {
attributeUnassign(productTypeId: $productTypeId, attributeIds: $attributeIds) {
errors {
field
message
}
productType {
id
variantAttributes {
id
}
productAttributes {
id
}
}
}
}
"""
def test_unassign_attributes_from_product_type(
staff_api_client, permission_manage_products, attribute_list
):
product_type = ProductType.objects.create(name="Type")
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
variant_attribute, *product_attributes = attribute_list
product_type.product_attributes.add(*product_attributes)
product_type.variant_attributes.add(variant_attribute)
remaining_attribute_global_id = graphene.Node.to_global_id(
"Attribute", product_attributes[1].pk
)
query = UNASSIGN_ATTR_QUERY
variables = {
"productTypeId": product_type_global_id,
"attributeIds": [
graphene.Node.to_global_id("Attribute", product_attributes[0].pk)
],
}
content = get_graphql_content(
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
)["data"]["attributeUnassign"]
assert not content["errors"]
assert content["productType"]["id"] == product_type_global_id
assert len(content["productType"]["productAttributes"]) == 1
assert len(content["productType"]["variantAttributes"]) == 1
assert (
content["productType"]["productAttributes"][0]["id"]
== remaining_attribute_global_id
)
def test_unassign_attributes_not_in_product_type(
staff_api_client, permission_manage_products, color_attribute_without_values
):
"""The unAssignAttribute mutation should not raise any error when trying
to remove an attribute that is not/no longer in the product type."""
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type = ProductType.objects.create(name="Type")
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = UNASSIGN_ATTR_QUERY
variables = {
"productTypeId": product_type_global_id,
"attributeIds": [
graphene.Node.to_global_id("Attribute", color_attribute_without_values.pk)
],
}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeUnassign"]
assert not content["errors"]
assert content["productType"]["id"] == product_type_global_id
assert len(content["productType"]["productAttributes"]) == 0
assert len(content["productType"]["variantAttributes"]) == 0
def test_retrieve_product_attributes_input_type(
staff_api_client, product, permission_manage_products
):
query = """
{
products(first: 10) {
edges {
node {
attributes {
values {
type
inputType
}
}
}
}
}
}
"""
found_products = get_graphql_content(
staff_api_client.post_graphql(query, permissions=[permission_manage_products])
)["data"]["products"]["edges"]
assert len(found_products) == 1
for gql_attr in found_products[0]["node"]["attributes"]:
assert len(gql_attr["values"]) == 1
assert gql_attr["values"][0]["type"] == "STRING"
assert gql_attr["values"][0]["inputType"] == "DROPDOWN"
@pytest.mark.parametrize(
"attribute, expected_value",
(
("filterable_in_storefront", True),
("filterable_in_dashboard", True),
("visible_in_storefront", True),
("available_in_grid", True),
("value_required", False),
("storefront_search_position", 0),
),
)
def test_retrieving_the_restricted_attributes_restricted(
staff_api_client,
color_attribute,
permission_manage_products,
attribute,
expected_value,
):
"""Checks if the attributes are restricted and if their default value
is the expected one."""
attribute = to_camel_case(attribute)
query = (
"""
{
attributes(first: 10) {
edges {
node {
%s
}
}
}
}
"""
% attribute
)
found_attributes = get_graphql_content(
staff_api_client.post_graphql(query, permissions=[permission_manage_products])
)["data"]["attributes"]["edges"]
assert len(found_attributes) == 1
assert found_attributes[0]["node"][attribute] == expected_value
ATTRIBUTES_RESORT_QUERY = """
mutation ProductTypeReorderAttributes(
$productTypeId: ID!
$moves: [ReorderInput]!
$type: AttributeTypeEnum!
) {
productTypeReorderAttributes(
productTypeId: $productTypeId
moves: $moves
type: $type
) {
productType {
id
variantAttributes {
id
slug
}
productAttributes {
id
}
}
errors {
field
message
}
}
}
"""
def test_sort_attributes_within_product_type_invalid_product_type(
staff_api_client, permission_manage_products
):
"""Try to reorder an invalid product type (invalid ID)."""
product_type_id = graphene.Node.to_global_id("ProductType", -1)
attribute_id = graphene.Node.to_global_id("Attribute", -1)
variables = {
"type": "VARIANT",
"productTypeId": product_type_id,
"moves": [{"id": attribute_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTES_RESORT_QUERY, variables, permissions=[permission_manage_products]
)
)["data"]["productTypeReorderAttributes"]
assert content["errors"] == [
{
"field": "productTypeId",
"message": f"Couldn't resolve to a product type: {product_type_id}",
}
]
def test_sort_attributes_within_product_type_invalid_id(
staff_api_client, permission_manage_products, color_attribute
):
"""Try to reorder an attribute not associated to the given product type."""
product_type = ProductType.objects.create(name="Dummy Type")
product_type_id = graphene.Node.to_global_id("ProductType", product_type.id)
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
variables = {
"type": "VARIANT",
"productTypeId": product_type_id,
"moves": [{"id": attribute_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTES_RESORT_QUERY, variables, permissions=[permission_manage_products]
)
)["data"]["productTypeReorderAttributes"]
assert content["errors"] == [
{
"field": "moves",
"message": f"Couldn't resolve to an attribute: {attribute_id}",
}
]
@pytest.mark.parametrize(
"attribute_type, relation_field, backref_field",
(
("VARIANT", "variant_attributes", "attributevariant"),
("PRODUCT", "product_attributes", "attributeproduct"),
),
)
def test_sort_attributes_within_product_type(
staff_api_client,
attribute_list,
permission_manage_products,
attribute_type,
relation_field,
backref_field,
):
attributes = attribute_list
assert len(attributes) == 3
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type = ProductType.objects.create(name="Dummy Type")
product_type_id = graphene.Node.to_global_id("ProductType", product_type.id)
m2m_attributes = getattr(product_type, relation_field)
m2m_attributes.set(attributes)
sort_method = getattr(m2m_attributes, f"{relation_field}_sorted")
attributes = list(sort_method())
assert len(attributes) == 3
variables = {
"type": attribute_type,
"productTypeId": product_type_id,
"moves": [
{
"id": graphene.Node.to_global_id("Attribute", attributes[0].pk),
"sortOrder": +1,
},
{
"id": graphene.Node.to_global_id("Attribute", attributes[2].pk),
"sortOrder": -1,
},
],
}
expected_order = [attributes[1].pk, attributes[2].pk, attributes[0].pk]
content = get_graphql_content(
staff_api_client.post_graphql(ATTRIBUTES_RESORT_QUERY, variables)
)["data"]["productTypeReorderAttributes"]
assert not content["errors"]
assert (
content["productType"]["id"] == product_type_id
), "Did not return the correct product type"
gql_attributes = content["productType"][snake_to_camel_case(relation_field)]
assert len(gql_attributes) == len(expected_order)
for attr, expected_pk in zip(gql_attributes, expected_order):
gql_type, gql_attr_id = graphene.Node.from_global_id(attr["id"])
assert gql_type == "Attribute"
assert int(gql_attr_id) == expected_pk
ATTRIBUTE_VALUES_RESORT_QUERY = """
mutation attributeReorderValues($attributeId: ID!, $moves: [ReorderInput]!) {
attributeReorderValues(attributeId: $attributeId, moves: $moves) {
attribute {
id
values {
id
}
}
errors {
field
message
}
}
}
"""
def test_sort_values_within_attribute_invalid_product_type(
staff_api_client, permission_manage_products
):
"""Try to reorder an invalid attribute (invalid ID)."""
attribute_id = graphene.Node.to_global_id("Attribute", -1)
value_id = graphene.Node.to_global_id("AttributeValue", -1)
variables = {
"attributeId": attribute_id,
"moves": [{"id": value_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTE_VALUES_RESORT_QUERY,
variables,
permissions=[permission_manage_products],
)
)["data"]["attributeReorderValues"]
assert content["errors"] == [
{
"field": "attributeId",
"message": f"Couldn't resolve to an attribute: {attribute_id}",
}
]
def test_sort_values_within_attribute_invalid_id(
staff_api_client, permission_manage_products, color_attribute
):
"""Try to reorder a value not associated to the given attribute."""
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
value_id = graphene.Node.to_global_id("AttributeValue", -1)
variables = {
"type": "VARIANT",
"attributeId": attribute_id,
"moves": [{"id": value_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTE_VALUES_RESORT_QUERY,
variables,
permissions=[permission_manage_products],
)
)["data"]["attributeReorderValues"]
assert content["errors"] == [
{
"field": "moves",
"message": f"Couldn't resolve to an attribute value: {value_id}",
}
]
def test_sort_values_within_attribute(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
AttributeValue.objects.create(attribute=attribute, name="Green", slug="green")
values = list(attribute.values.all())
assert len(values) == 3
staff_api_client.user.user_permissions.add(permission_manage_products)
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
m2m_values = attribute.values
m2m_values.set(values)
assert values == sorted(
values, key=lambda o: o.sort_order if o.sort_order is not None else o.pk
), "The values are not properly ordered"
variables = {
"attributeId": attribute_id,
"moves": [
{
"id": graphene.Node.to_global_id("AttributeValue", values[0].pk),
"sortOrder": +1,
},
{
"id": graphene.Node.to_global_id("AttributeValue", values[2].pk),
"sortOrder": -1,
},
],
}
expected_order = [values[1].pk, values[2].pk, values[0].pk]
content = get_graphql_content(
staff_api_client.post_graphql(ATTRIBUTE_VALUES_RESORT_QUERY, variables)
)["data"]["attributeReorderValues"]
assert not content["errors"]
assert content["attribute"]["id"] == attribute_id
gql_values = content["attribute"]["values"]
assert len(gql_values) == len(expected_order)
actual_order = []
for attr, expected_pk in zip(gql_values, expected_order):
gql_type, gql_attr_id = graphene.Node.from_global_id(attr["id"])
assert gql_type == "AttributeValue"
actual_order.append(int(gql_attr_id))
assert actual_order == expected_order
ATTRIBUTES_FILTER_QUERY = """
query($filters: AttributeFilterInput!) {
attributes(first: 10, filter: $filters) {
edges {
node {
name
slug
}
}
}
}
"""
def test_search_attributes(api_client, color_attribute, size_attribute):
variables = {"filters": {"search": "color"}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 1
assert attributes[0]["node"]["slug"] == "color"
def test_filter_attributes_if_filterable_in_dashboard(
api_client, color_attribute, size_attribute
):
color_attribute.filterable_in_dashboard = False
color_attribute.save(update_fields=["filterable_in_dashboard"])
variables = {"filters": {"filterableInDashboard": True}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 1
assert attributes[0]["node"]["slug"] == "size"
def test_filter_attributes_if_available_in_grid(
api_client, color_attribute, size_attribute
):
color_attribute.available_in_grid = False
color_attribute.save(update_fields=["available_in_grid"])
variables = {"filters": {"availableInGrid": True}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 1
assert attributes[0]["node"]["slug"] == "size"
def test_filter_attributes_by_global_id_list(api_client, attribute_list):
global_ids = [
graphene.Node.to_global_id("Attribute", attribute.pk)
for attribute in attribute_list[:2]
]
variables = {"filters": {"ids": global_ids}}
expected_slugs = sorted([attribute_list[0].slug, attribute_list[1].slug])
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
received_slugs = sorted(
[attributes[0]["node"]["slug"], attributes[1]["node"]["slug"]]
)
assert received_slugs == expected_slugs
ATTRIBUTES_SORT_QUERY = """
query($sortBy: AttributeSortingInput) {
attributes(first: 10, sortBy: $sortBy) {
edges {
node {
slug
}
}
}
}
"""
def test_sort_attributes_by_slug(api_client):
Attribute.objects.bulk_create(
[
Attribute(name="MyAttribute", slug="b"),
Attribute(name="MyAttribute", slug="a"),
]
)
variables = {"sortBy": {"field": "SLUG", "direction": "ASC"}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_SORT_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
assert attributes[0]["node"]["slug"] == "a"
assert attributes[1]["node"]["slug"] == "b"
@pytest.mark.parametrize(
"sort_field, m2m_model",
(
("DASHBOARD_VARIANT_POSITION", AttributeVariant),
("DASHBOARD_PRODUCT_POSITION", AttributeProduct),
),
)
def test_sort_attributes_by_position_in_product_type(
api_client,
color_attribute,
size_attribute,
sort_field: str,
m2m_model: Union[AttributeVariant, AttributeProduct],
):
"""Sorts attributes for dashboard custom ordering inside a given product type."""
product_type = ProductType.objects.create(name="My Product Type")
m2m_model.objects.create(
product_type=product_type, attribute=color_attribute, sort_order=0
)
m2m_model.objects.create(
product_type=product_type, attribute=size_attribute, sort_order=1
)
variables = {"sortBy": {"field": sort_field, "direction": "DESC"}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_SORT_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
assert attributes[0]["node"]["slug"] == "size"
assert attributes[1]["node"]["slug"] == "color"
def test_sort_attributes_by_default_sorting(api_client):
"""Don't provide any sorting, this should sort by name by default."""
Attribute.objects.bulk_create(
[Attribute(name="A", slug="b"), Attribute(name="B", slug="a")]
)
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_SORT_QUERY, {})
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
assert attributes[0]["node"]["slug"] == "b"
assert attributes[1]["node"]["slug"] == "a"
@pytest.mark.parametrize("is_variant", (True, False))
def test_attributes_of_products_are_sorted(
staff_api_client, product, color_attribute, is_variant
):
"""Ensures the attributes of products and variants are sorted."""
variant = product.variants.first()
if is_variant:
query = """
query($id: ID!) {
productVariant(id: $id) {
attributes {
attribute {
id
}
}
}
}
"""
else:
query = """
query($id: ID!) {
product(id: $id) {
attributes {
attribute {
id
}
}
}
}
"""
# Create a dummy attribute with a higher ID
# This will allow us to make sure it is always the last attribute
# when sorted by ID. Thus, we are sure the query is actually passing the test.
other_attribute = Attribute.objects.create(name="Other", slug="other")
# Add the attribute to the product type
if is_variant:
product.product_type.variant_attributes.set([color_attribute, other_attribute])
else:
product.product_type.product_attributes.set([color_attribute, other_attribute])
# Retrieve the M2M object for the attribute vs the product type
if is_variant:
m2m_rel_other_attr = other_attribute.attributevariant.last()
else:
m2m_rel_other_attr = other_attribute.attributeproduct.last()
# Push the last attribute to the top and let the others to None
m2m_rel_other_attr.sort_order = 0
m2m_rel_other_attr.save(update_fields=["sort_order"])
# Assign attributes to the product
node = variant if is_variant else product # type: Union[Product, ProductVariant]
node.attributesrelated.clear()
associate_attribute_values_to_instance(
node, color_attribute, color_attribute.values.first()
)
# Sort the database attributes by their sort order and ID (when None)
expected_order = [other_attribute.pk, color_attribute.pk]
# Make the node ID
if is_variant:
node_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
else:
node_id = graphene.Node.to_global_id("Product", product.pk)
# Retrieve the attributes
data = get_graphql_content(staff_api_client.post_graphql(query, {"id": node_id}))[
"data"
]
attributes = data["productVariant" if is_variant else "product"]["attributes"]
actual_order = [
int(graphene.Node.from_global_id(attr["attribute"]["id"])[1])
for attr in attributes
]
# Compare the received data against our expectations
assert actual_order == expected_order
|
6043
|
import copy
from typing import Callable, Dict, List, Optional
import torch
import torch.nn as nn
import torch.optim as optim
from ai_traineree import DEVICE
from ai_traineree.agents import AgentBase
from ai_traineree.agents.agent_utils import soft_update
from ai_traineree.buffers import NStepBuffer, PERBuffer
from ai_traineree.buffers.buffer_factory import BufferFactory
from ai_traineree.loggers import DataLogger
from ai_traineree.networks.heads import RainbowNet
from ai_traineree.types import ActionType, AgentState, BufferState, DoneType, NetworkState, ObsType, RewardType
from ai_traineree.types.dataspace import DataSpace
from ai_traineree.utils import to_numbers_seq, to_tensor
class RainbowAgent(AgentBase):
"""Rainbow agent as described in [1].
Rainbow is a DQN agent with some improvments that were suggested before 2017.
As mentioned by the authors it's not exhaustive improvment but all changes are in
relatively separate areas so their connection makes sense. These improvements are:
* Priority Experience Replay
* Multi-step
* Double Q net
* Dueling nets
* NoisyNet
* CategoricalNet for Q estimate
Consider this class as a particular version of the DQN agent.
[1] "Rainbow: Combining Improvements in Deep Reinforcement Learning" by Hessel et al. (DeepMind team)
https://arxiv.org/abs/1710.02298
"""
model = "Rainbow"
def __init__(
self,
obs_space: DataSpace,
action_space: DataSpace,
state_transform: Optional[Callable]=None,
reward_transform: Optional[Callable]=None,
**kwargs
):
"""
A wrapper over the DQN thus majority of the logic is in the DQNAgent.
Special treatment is required because the Rainbow agent uses categorical nets
which operate on probability distributions. Each action is taken as the estimate
from such distributions.
Parameters:
obs_space (DataSpace): Dataspace describing the input.
action_space (DataSpace): Dataspace describing the output.
state_transform (optional func):
reward_transform (optional func):
Keyword parameters:
pre_network_fn (function that takes input_shape and returns network):
Used to preprocess state before it is used in the value- and advantage-function in the dueling nets.
hidden_layers (tuple of ints): Shape of the hidden layers in fully connected network. Default: (100, 100).
lr (default: 1e-3): Learning rate value.
gamma (float): Discount factor. Default: 0.99.
tau (float): Soft-copy factor. Default: 0.002.
update_freq (int): Number of steps between each learning step. Default 1.
batch_size (int): Number of samples to use at each learning step. Default: 80.
buffer_size (int): Number of most recent samples to keep in memory for learning. Default: 1e5.
warm_up (int): Number of samples to observe before starting any learning step. Default: 0.
number_updates (int): How many times to use learning step in the learning phase. Default: 1.
max_grad_norm (float): Maximum norm of the gradient used in learning. Default: 10.
using_double_q (bool): Whether to use Double Q Learning network. Default: True.
n_steps (int): Number of lookahead steps when estimating reward. See :ref:`NStepBuffer`. Default: 3.
v_min (float): Lower bound for distributional value V. Default: -10.
v_max (float): Upper bound for distributional value V. Default: 10.
num_atoms (int): Number of atoms (discrete states) in the value V distribution. Default: 21.
"""
super().__init__(**kwargs)
self.device = self._register_param(kwargs, "device", DEVICE, update=True)
self.obs_space = obs_space
self.action_space = action_space
self._config['obs_space'] = self.obs_space
self._config['action_space'] = self.action_space
self.action_size = action_space.to_feature()
self.lr = float(self._register_param(kwargs, 'lr', 3e-4))
self.gamma = float(self._register_param(kwargs, 'gamma', 0.99))
self.tau = float(self._register_param(kwargs, 'tau', 0.002))
self.update_freq = int(self._register_param(kwargs, 'update_freq', 1))
self.batch_size = int(self._register_param(kwargs, 'batch_size', 80, update=True))
self.buffer_size = int(self._register_param(kwargs, 'buffer_size', int(1e5), update=True))
self.warm_up = int(self._register_param(kwargs, 'warm_up', 0))
self.number_updates = int(self._register_param(kwargs, 'number_updates', 1))
self.max_grad_norm = float(self._register_param(kwargs, 'max_grad_norm', 10))
self.iteration: int = 0
self.using_double_q = bool(self._register_param(kwargs, "using_double_q", True))
self.state_transform = state_transform if state_transform is not None else lambda x: x
self.reward_transform = reward_transform if reward_transform is not None else lambda x: x
v_min = float(self._register_param(kwargs, "v_min", -10))
v_max = float(self._register_param(kwargs, "v_max", 10))
self.num_atoms = int(self._register_param(kwargs, "num_atoms", 21, drop=True))
self.z_atoms = torch.linspace(v_min, v_max, self.num_atoms, device=self.device)
self.z_delta = self.z_atoms[1] - self.z_atoms[0]
self.buffer = PERBuffer(**kwargs)
self.__batch_indices = torch.arange(self.batch_size, device=self.device)
self.n_steps = int(self._register_param(kwargs, "n_steps", 3))
self.n_buffer = NStepBuffer(n_steps=self.n_steps, gamma=self.gamma)
# Note that in case a pre_network is provided, e.g. a shared net that extracts pixels values,
# it should be explicitly passed in kwargs
kwargs["hidden_layers"] = to_numbers_seq(self._register_param(kwargs, "hidden_layers", (100, 100)))
self.net = RainbowNet(obs_space.shape, self.action_size, num_atoms=self.num_atoms, **kwargs)
self.target_net = RainbowNet(obs_space.shape, self.action_size, num_atoms=self.num_atoms, **kwargs)
self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr)
self.dist_probs = None
self._loss = float('nan')
@property
def loss(self):
return {'loss': self._loss}
@loss.setter
def loss(self, value):
if isinstance(value, dict):
value = value['loss']
self._loss = value
def step(self, obs: ObsType, action: ActionType, reward: RewardType, next_obs: ObsType, done: DoneType) -> None:
"""Letting the agent to take a step.
On some steps the agent will initiate learning step. This is dependent on
the `update_freq` value.
Parameters:
obs (ObservationType): Observation.
action (int): Discrete action associated with observation.
reward (float): Reward obtained for taking action at state.
next_obs (ObservationType): Observation in a state where the action took.
done: (bool) Whether in terminal (end of episode) state.
"""
assert isinstance(action, int), "Rainbow expects discrete action (int)"
self.iteration += 1
t_obs = to_tensor(self.state_transform(obs)).float().to("cpu")
t_next_obs = to_tensor(self.state_transform(next_obs)).float().to("cpu")
reward = self.reward_transform(reward)
# Delay adding to buffer to account for n_steps (particularly the reward)
self.n_buffer.add(
state=t_obs.numpy(), action=[int(action)], reward=[reward], done=[done], next_state=t_next_obs.numpy()
)
if not self.n_buffer.available:
return
self.buffer.add(**self.n_buffer.get().get_dict())
if self.iteration < self.warm_up:
return
if len(self.buffer) >= self.batch_size and (self.iteration % self.update_freq) == 0:
for _ in range(self.number_updates):
self.learn(self.buffer.sample())
# Update networks only once - sync local & target
soft_update(self.target_net, self.net, self.tau)
def act(self, obs: ObsType, eps: float = 0.) -> int:
"""
Returns actions for given state as per current policy.
Parameters:
state: Current available state from the environment.
epislon: Epsilon value in the epislon-greedy policy.
"""
# Epsilon-greedy action selection
if self._rng.random() < eps:
# TODO: Update with action_space.sample() once implemented
assert len(self.action_space.shape) == 1, "Only 1D is supported right now"
return self._rng.randint(self.action_space.low, self.action_space.high)
t_obs = to_tensor(self.state_transform(obs)).float().unsqueeze(0).to(self.device)
self.dist_probs = self.net.act(t_obs)
q_values = (self.dist_probs * self.z_atoms).sum(-1)
return int(q_values.argmax(-1)) # Action maximizes state-action value Q(s, a)
def learn(self, experiences: Dict[str, List]) -> None:
"""
Parameters:
experiences: Contains all experiences for the agent. Typically sampled from the memory buffer.
Five keys are expected, i.e. `state`, `action`, `reward`, `next_state`, `done`.
Each key contains a array and all arrays have to have the same length.
"""
rewards = to_tensor(experiences['reward']).float().to(self.device)
dones = to_tensor(experiences['done']).type(torch.int).to(self.device)
states = to_tensor(experiences['state']).float().to(self.device)
next_states = to_tensor(experiences['next_state']).float().to(self.device)
actions = to_tensor(experiences['action']).type(torch.long).to(self.device)
assert rewards.shape == dones.shape == (self.batch_size, 1)
assert states.shape == next_states.shape == (self.batch_size,) + self.obs_space.shape
assert actions.shape == (self.batch_size, 1) # Discrete domain
with torch.no_grad():
prob_next = self.target_net.act(next_states)
q_next = (prob_next * self.z_atoms).sum(-1) * self.z_delta
if self.using_double_q:
duel_prob_next = self.net.act(next_states)
a_next = torch.argmax((duel_prob_next * self.z_atoms).sum(-1), dim=-1)
else:
a_next = torch.argmax(q_next, dim=-1)
prob_next = prob_next[self.__batch_indices, a_next, :]
m = self.net.dist_projection(rewards, 1 - dones, self.gamma ** self.n_steps, prob_next)
assert m.shape == (self.batch_size, self.num_atoms)
log_prob = self.net(states, log_prob=True)
assert log_prob.shape == (self.batch_size,) + self.action_size + (self.num_atoms,)
log_prob = log_prob[self.__batch_indices, actions.squeeze(), :]
assert log_prob.shape == m.shape == (self.batch_size, self.num_atoms)
# Cross-entropy loss error and the loss is batch mean
error = -torch.sum(m * log_prob, 1)
assert error.shape == (self.batch_size,)
loss = error.mean()
assert loss >= 0
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.net.parameters(), self.max_grad_norm)
self.optimizer.step()
self._loss = float(loss.item())
if hasattr(self.buffer, 'priority_update'):
assert (~torch.isnan(error)).any()
self.buffer.priority_update(experiences['index'], error.detach().cpu().numpy())
# Update networks - sync local & target
soft_update(self.target_net, self.net, self.tau)
def state_dict(self) -> Dict[str, dict]:
"""Returns agent's state dictionary.
Returns:
State dicrionary for internal networks.
"""
return {"net": self.net.state_dict(), "target_net": self.target_net.state_dict()}
def log_metrics(self, data_logger: DataLogger, step: int, full_log: bool=False):
data_logger.log_value("loss/agent", self._loss, step)
if full_log and self.dist_probs is not None:
assert len(self.action_space.shape) == 1, "Only 1D actions currently supported"
action_size = self.action_size[0]
for action_idx in range(action_size):
dist = self.dist_probs[0, action_idx]
data_logger.log_value(f'dist/expected_{action_idx}', (dist*self.z_atoms).sum().item(), step)
data_logger.add_histogram(
f'dist/Q_{action_idx}', min=self.z_atoms[0], max=self.z_atoms[-1], num=len(self.z_atoms),
sum=dist.sum(), sum_squares=dist.pow(2).sum(), bucket_limits=self.z_atoms+self.z_delta,
bucket_counts=dist, global_step=step
)
# This method, `log_metrics`, isn't executed on every iteration but just in case we delay plotting weights.
# It simply might be quite costly. Thread wisely.
if full_log:
for idx, layer in enumerate(self.net.value_net.layers):
if hasattr(layer, "weight"):
data_logger.create_histogram(f"value_net/layer_weights_{idx}", layer.weight.cpu(), step)
if hasattr(layer, "bias") and layer.bias is not None:
data_logger.create_histogram(f"value_net/layer_bias_{idx}", layer.bias.cpu(), step)
for idx, layer in enumerate(self.net.advantage_net.layers):
if hasattr(layer, "weight"):
data_logger.create_histogram(f"advantage_net/layer_{idx}", layer.weight.cpu(), step)
if hasattr(layer, "bias") and layer.bias is not None:
data_logger.create_histogram(f"advantage_net/layer_bias_{idx}", layer.bias.cpu(), step)
def get_state(self) -> AgentState:
"""Provides agent's internal state."""
return AgentState(
model=self.model,
obs_space=self.obs_space,
action_space=self.action_space,
config=self._config,
buffer=copy.deepcopy(self.buffer.get_state()),
network=copy.deepcopy(self.get_network_state()),
)
def get_network_state(self) -> NetworkState:
return NetworkState(net=dict(net=self.net.state_dict(), target_net=self.target_net.state_dict()))
@staticmethod
def from_state(state: AgentState) -> AgentBase:
config = copy.copy(state.config)
config.update({'obs_space': state.obs_space, 'action_space': state.action_space})
agent = RainbowAgent(**config)
if state.network is not None:
agent.set_network(state.network)
if state.buffer is not None:
agent.set_buffer(state.buffer)
return agent
def set_network(self, network_state: NetworkState) -> None:
self.net.load_state_dict(network_state.net['net'])
self.target_net.load_state_dict(network_state.net['target_net'])
def set_buffer(self, buffer_state: BufferState) -> None:
self.buffer = BufferFactory.from_state(buffer_state)
def save_state(self, path: str) -> None:
"""Saves agent's state into a file.
Parameters:
path: String path where to write the state.
"""
agent_state = self.get_state()
torch.save(agent_state, path)
def load_state(self, path: str) -> None:
"""Loads state from a file under provided path.
Parameters:
path: String path indicating where the state is stored.
"""
agent_state = torch.load(path)
self._config = agent_state.get('config', {})
self.__dict__.update(**self._config)
self.net.load_state_dict(agent_state['net'])
self.target_net.load_state_dict(agent_state['target_net'])
def save_buffer(self, path: str) -> None:
"""Saves data from the buffer into a file under provided path.
Parameters:
path: String path where to write the buffer.
"""
import json
dump = self.buffer.dump_buffer(serialize=True)
with open(path, 'w') as f:
json.dump(dump, f)
def load_buffer(self, path: str) -> None:
"""Loads data into the buffer from provided file path.
Parameters:
path: String path indicating where the buffer is stored.
"""
import json
with open(path, 'r') as f:
buffer_dump = json.load(f)
self.buffer.load_buffer(buffer_dump)
def __eq__(self, o: object) -> bool:
return super().__eq__(o) \
and isinstance(o, type(self)) \
and self._config == o._config \
and self.buffer == o.buffer \
and self.get_network_state() == o.get_network_state()
|
6047
|
import pytest
from pathlib import Path
from blendtorch import btt
BLENDDIR = Path(__file__).parent/'blender'
class MyEnv(btt.env.OpenAIRemoteEnv):
def __init__(self, background=True, **kwargs):
super().__init__(version='1.0.0')
self.launch(scene=BLENDDIR/'env.blend', script=BLENDDIR /
'env.blend.py', background=background, **kwargs)
# For Blender 2.9 if we pass scene='', the tests below fail since
# _env_post_step() is not called. Its unclear currently why this happens.
def _run_remote_env(background):
env = MyEnv(background=background)
obs = env.reset()
assert obs == 0.
obs, reward, done, info = env.step(0.1)
assert obs == pytest.approx(0.1)
assert reward == 0.
assert not done
assert info['count'] == 2 # 1 is already set by reset()
obs, reward, done, info = env.step(0.6)
assert obs == pytest.approx(0.6)
assert reward == 1.
assert not done
assert info['count'] == 3
for _ in range(8):
obs, reward, done, info = env.step(0.6)
assert done
obs = env.reset()
assert obs == 0.
obs, reward, done, info = env.step(0.1)
assert obs == pytest.approx(0.1)
assert reward == 0.
assert not done
assert info['count'] == 2
env.close()
@pytest.mark.background
def test_remote_env():
_run_remote_env(background=True)
def test_remote_env_ui():
_run_remote_env(background=False)
|
6078
|
import numpy as np
import scipy as sp
import scipy.sparse.linalg as splinalg
def eig2_nL(g, tol_eigs = 1.0e-6, normalize:bool = True, dim:int=1):
"""
DESCRIPTION
-----------
Computes the eigenvector that corresponds to the second smallest eigenvalue
of the normalized Laplacian matrix then it uses sweep cut to round the solution.
PARAMETERS (mandatory)
----------------------
g: graph object
PARAMETERS (optional)
---------------------
dim: positive, int
default == 1
The number of eigenvectors or dimensions to compute.
tol_eigs: positive float, double
default == 1.0e-6
Tolerance for computation of the eigenvector that corresponds to
the second smallest eigenvalue of the normalized Laplacian matrix.
normalize: bool,
default == True
True if we should return the eigenvectors of the generalized
eigenvalue problem associated with the normalized Laplacian.
This should be on unless you know what you are doing.
RETURNS
------
p: Eigenvector or Eigenvector matrixthat
corresponds to the second smallest eigenvalue of the
normalized Laplacian matrix and larger eigenvectors if dim >= 0.
"""
n = g.adjacency_matrix.shape[0]
D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt.transpose(), 0, n, n)
L = sp.sparse.identity(n) - D_sqrt_neg.dot((g.adjacency_matrix.dot(D_sqrt_neg)))
emb_eig_val, p = splinalg.eigsh(L, which='SM', k=1+dim, tol = tol_eigs)
F = np.real(p[:,1:])
if normalize:
F *= g.dn_sqrt[:,np.newaxis]
return F, emb_eig_val
"""
Random walks and local cuts in graphs, Chung, LAA 2007
We just form the sub-matrix of the Laplacian and use the eigenvector there.
"""
def eig2nL_subgraph(g, ref_nodes, tol_eigs = 1.0e-6, normalize: bool = True):
A_sub = g.adjacency_matrix.tocsr()[ref_nodes, :].tocsc()[:, ref_nodes]
nref = len(ref_nodes)
D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt[ref_nodes].transpose(), 0, nref, nref)
L_sub = sp.sparse.identity(nref) - D_sqrt_neg.dot((A_sub.dot(D_sqrt_neg)))
emb_eig_val, emb_eig = splinalg.eigsh(L_sub, which='SM', k=1, tol=tol_eigs)
emb_eig *= -1 if max(emb_eig) < 0 else 1
f = emb_eig[:,0]
if normalize:
f *= g.dn_sqrt[ref_nodes]
return ((ref_nodes,f), emb_eig_val)
|
6092
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class PGDModel(nn.Module):
"""
code adapted from
https://github.com/karandwivedi42/adversarial/blob/master/main.py
"""
def __init__(self, basic_net, config):
super(PGDModel, self).__init__()
self.basic_net = basic_net
self.rand = config['random_start']
self.step_size = config['step_size']
self.epsilon = config['epsilon']
self.num_steps = config['num_steps']
assert config['loss_func'] == 'xent', 'Only xent supported for now.'
def forward(self, inputs, targets, attack=False):
if not attack:
return self.basic_net(inputs)
x = inputs.clone()
if self.rand:
x = x + torch.zeros_like(x).uniform_(-self.epsilon, self.epsilon)
for _ in range(self.num_steps):
x.requires_grad_()
with torch.enable_grad():
logits = self.basic_net(x)
loss = F.cross_entropy(logits, targets, reduction='sum')
grad = torch.autograd.grad(loss, x)[0]
x = x.detach() + self.step_size * torch.sign(grad.detach())
x = torch.min(torch.max(x, inputs.detach() - self.epsilon),
inputs.detach() + self.epsilon)
x = torch.clamp(x, 0, 1)
return self.basic_net(x)
class PGDL2Model(nn.Module):
"""
code adapted from
https://github.com/karandwivedi42/adversarial/blob/master/main.py
"""
def __init__(self, basic_net, config):
super(PGDL2Model, self).__init__()
self.basic_net = basic_net
self.epsilon = config['epsilon']
self.rand = config['random_start']
self.step_size = config['step_size']
self.num_steps = config['num_steps']
assert config['loss_func'] == 'xent', 'Only xent supported for now.'
def forward(self, inputs, targets, attack=False):
if not attack:
return self.basic_net(inputs)
x = inputs.clone()
if self.rand:
x = x + torch.zeros_like(x).normal_(0, self.step_size)
for _ in range(self.num_steps):
x.requires_grad_()
with torch.enable_grad():
logits = self.basic_net(x)
loss = F.cross_entropy(logits, targets, reduction='sum')
grad = torch.autograd.grad(loss, x)[0].detach()
grad_norm = grad.view(x.size(0), -1).norm(2, 1)
delta = self.step_size * grad / grad_norm.view(x.size(0), 1, 1, 1)
x = x.detach() + delta
diff = (x - inputs).view(x.size(0), -1).renorm(2, 0, self.epsilon)
x = diff.view(x.size()) + inputs
x.clamp_(0, 1)
return self.basic_net(x)
|
6119
|
import numpy as np
def rot_to_angle(rot):
return np.arccos(0.5*np.trace(rot)-0.5)
def rot_to_heading(rot):
# This function calculates the heading angle of the rot matrix w.r.t. the y-axis
new_rot = rot[0:3:2, 0:3:2] # remove the mid row and column corresponding to the y-axis
new_rot = new_rot/np.linalg.det(new_rot)
return np.arctan2(new_rot[1, 0], new_rot[0, 0])
|
6121
|
import logging
logger = logging.getLogger(__name__)
import random
import chainercv
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # NOQA
from pose.hand_dataset.geometry_utils import normalize_joint_zyx
from pose.hand_dataset.image_utils import normalize_depth
# Decimal Code (R,G,B)
BASE_COLOR = {
"RED": (255, 0, 0),
"GREEN": (0, 255, 0),
"BLUE": (0, 0, 255),
"YELLOW": (255, 255, 0),
"CYAN": (0, 255, 255),
"MAGENTA": (255, 0, 255),
}
def vis_image(img, ax=None):
"""
extend chainercv.visualizations.vis_image
"""
C, H, W = img.shape
if C == 1:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# remove channnel dimension
ax.imshow(img.squeeze())
else:
ax = chainercv.visualizations.vis_image(img, ax)
return ax
def preprocess(point, ax, img):
input_point = np.asarray(point)
if input_point.ndim == 2:
input_point = np.expand_dims(point, axis=0)
H, W = None, None
if ax is None:
fig = plt.figure()
if input_point.shape[-1] == 3:
ax = fig.add_subplot(1, 1, 1, projection="3d")
else:
ax = fig.add_subplot(1, 1, 1)
if img is not None:
ax = vis_image(img, ax=ax)
_, H, W = img.shape
return input_point, ax, H, W
def vis_point(point, img=None, color=None, ax=None):
"""
Visualize points in an image, customized to our purpose.
Base implementation is taken from chainercv.visualizations.vis_image
"""
point, ax, H, W = preprocess(point, ax, img)
n_inst = len(point)
c = np.asarray(color) / 255. if color is not None else None
for i in range(n_inst):
# note that the shape of `point[i]` is (K,N) and the format of one is (y, x), (z,y,x).
# (K, N) -> (N, K)
pts = point[i].transpose() # (K,N) -> (N,K)
# resort coordinate order : yx -> xy or zyx -> xyz
pts = pts[::-1]
ax.scatter(*pts, c=c)
if W is not None:
ax.set_xlim(left=0, right=W)
if H is not None:
ax.set_ylim(bottom=H - 1, top=0)
return ax
def vis_edge(point, indices, img=None, color=None, ax=None):
"""
Visualize edges in an image
"""
point, ax, H, W = preprocess(point, ax, img)
n_inst = len(point)
if color is not None:
color = np.asarray(color) / 255.
else:
color = [None] * len(indices)
for i in range(n_inst):
# note that the shape of `point[i]` is (K,N) and the format of one is (y, x) or (z,y,x).
pts = point[i]
for ((s, t), c) in zip(indices, color):
# Select point which consists edge. It is a pair or point (start, target).
# Note that [::-1] does resort coordinate order: yx -> xy or zyx -> xyz
edge = pts[[s, t]].transpose()
edge = edge[::-1]
ax.plot(*edge, c=c)
if W is not None:
ax.set_xlim(left=0, right=W)
if H is not None:
ax.set_ylim(bottom=H - 1, top=0)
return ax
def vis_pose(point, indices, img=None, point_color=None, edge_color=None, ax=None):
ax = vis_point(point, img=img, color=point_color, ax=ax)
vis_edge(point, indices, img=img, color=edge_color, ax=ax)
def visualize_both(dataset, keypoint_names, edges, color_map, normalize=False):
import random
idx = random.randint(0, len(dataset) - 1)
logger.info("get example")
example = dataset.get_example(idx)
logger.info("Done get example")
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223, projection="3d")
ax4 = fig.add_subplot(224, projection="3d")
color = [color_map[k] for k in keypoint_names]
edge_color = [color_map[s, t] for s, t in edges]
depth = example["depth"].astype(np.float32)
depth_joint = example["depth_joint"]
depth_camera = example["depth_camera"]
depth_vu, depth_z = depth_camera.zyx2vu(depth_joint, return_z=True)
z_size = example["param"]["z_size"]
if normalize:
depth = normalize_depth(depth, z_com=depth_z.mean(), z_size=z_size)
depth_joint = normalize_joint_zyx(depth_joint, depth_camera, z_size)
rgb = example["rgb"]
rgb_joint = example["rgb_joint"]
rgb_camera = example["rgb_camera"]
rgb_vu = rgb_camera.zyx2vu(rgb_joint)
rgb_joint = normalize_joint_zyx(rgb_joint, rgb_camera, z_size)
print(example["param"])
vis_point(rgb_vu, img=rgb, color=color, ax=ax1)
vis_edge(rgb_vu, indices=edges, color=edge_color, ax=ax1)
vis_point(rgb_joint, color=color, ax=ax3)
vis_edge(rgb_joint, indices=edges, color=edge_color, ax=ax3)
vis_point(depth_vu, img=depth, color=color, ax=ax2)
vis_edge(depth_vu, indices=edges, color=edge_color, ax=ax2)
vis_point(depth_joint, color=color, ax=ax4)
vis_edge(depth_joint, indices=edges, color=edge_color, ax=ax4)
for ax in [ax3, ax4]:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(-65, -90)
plt.savefig("output.png")
plt.show()
def visualize_rgb(dataset, keypoint_names, edges, color_map, idx=None):
import random
if idx is None:
idx = random.randint(0, len(dataset) - 1)
logger.info("get example")
example = dataset.get_example(idx)
logger.info("Done get example")
fig = plt.figure(figsize=(5, 10))
ax1 = fig.add_subplot(211)
ax3 = fig.add_subplot(212, projection="3d")
color = [color_map[k] for k in keypoint_names]
edge_color = [color_map[s, t] for s, t in edges]
rgb = example["rgb"]
rgb_joint = example["rgb_joint"]
rgb_camera = example["rgb_camera"]
rgb_vu = rgb_camera.zyx2vu(rgb_joint)
vis_point(rgb_vu, img=rgb, color=color, ax=ax1)
vis_edge(rgb_vu, indices=edges, color=edge_color, ax=ax1)
vis_point(rgb_joint, color=color, ax=ax3)
vis_edge(rgb_joint, indices=edges, color=edge_color, ax=ax3)
for ax in [ax3]:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(-65, -90)
plt.savefig("output.png")
plt.show()
def visualize_depth(dataset, keypoint_names, edges, color_map, normalize=False):
idx = random.randint(0, len(dataset) - 1)
logger.info("get example")
example = dataset.get_example(idx)
logger.info("Done get example")
fig = plt.figure(figsize=(5, 10))
ax2 = fig.add_subplot(211)
ax4 = fig.add_subplot(212, projection="3d")
color = [color_map[k] for k in keypoint_names]
edge_color = [color_map[s, t] for s, t in edges]
depth = example["depth"].astype(np.float32)
depth_joint = example["depth_joint"]
depth_camera = example["depth_camera"]
depth_vu, depth_z = depth_camera.zyx2vu(depth_joint, return_z=True)
z_size = example["param"]["z_size"]
if normalize:
depth = normalize_depth(depth, z_com=depth_z.mean(), z_size=z_size)
depth_joint = normalize_joint_zyx(depth_joint, depth_camera, z_size)
print(example["param"])
vis_point(depth_vu, img=depth, color=color, ax=ax2)
vis_edge(depth_vu, indices=edges, color=edge_color, ax=ax2)
vis_point(depth_joint, color=color, ax=ax4)
vis_edge(depth_joint, indices=edges, color=edge_color, ax=ax4)
for ax in [ax4]:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(-65, -90)
plt.savefig("output.png")
plt.show()
|
6125
|
from typing import List, Tuple, Union
import numpy as np
import scipy.special
from PIL import Image, ImageFilter
class RandomBetaMorphology:
def __init__(
self, filter_size_min: int, filter_size_max: int, alpha: float, beta: float
) -> None:
assert filter_size_min % 2 != 0, "Filter size must be odd"
assert filter_size_max % 2 != 0, "Filter size must be odd"
self.filter_size_min = filter_size_min
self.filter_size_max = filter_size_max
self.alpha = alpha
self.beta = beta
self.filter_sizes, self.filter_probs = self._create_filter_distribution(
filter_size_min, filter_size_max, alpha, beta
)
@staticmethod
def _create_filter_distribution(
filter_size_min: int, filter_size_max: int, alpha: float, beta: float
) -> Tuple[List[int], Union[List[float], np.ndarray]]:
n = (filter_size_max - filter_size_min) // 2 + 1
if n < 2:
return [filter_size_min], np.asarray([1.0], dtype=np.float32)
filter_sizes = []
filter_probs = []
for k in range(n):
filter_sizes.append(filter_size_min + 2 * k)
filter_probs.append(
scipy.special.comb(n, k) * scipy.special.beta(alpha + k, n - k + beta)
)
np_filter_probs = np.asarray(filter_probs, dtype=np.float32)
np_filter_probs = filter_probs / np_filter_probs.sum()
return filter_sizes, np_filter_probs
def sample_filter_size(self):
filter_size = np.random.choice(self.filter_sizes, p=self.filter_probs)
return filter_size
def __call__(self, *args, **kwargs):
return NotImplementedError
def __repr__(self) -> str:
return (
f"vision.{self.__class__.__name__}("
f"filter_size_min={self.filter_size_min}, "
f"filter_size_max={self.filter_size_max}, "
f"alpha={self.alpha}, beta={self.beta})"
)
class Dilate(RandomBetaMorphology):
def __init__(
self,
filter_size_min: int = 3,
filter_size_max: int = 7,
alpha: float = 1,
beta: float = 3,
) -> None:
super().__init__(filter_size_min, filter_size_max, alpha, beta)
def __call__(self, img: Image) -> Image:
filter_size = self.sample_filter_size()
return img.filter(ImageFilter.MaxFilter(filter_size))
class Erode(RandomBetaMorphology):
def __init__(
self,
filter_size_min: int = 3,
filter_size_max: int = 5,
alpha: float = 1,
beta: float = 3,
) -> None:
super().__init__(filter_size_min, filter_size_max, alpha, beta)
def __call__(self, img: Image) -> Image:
filter_size = self.sample_filter_size()
return img.filter(ImageFilter.MinFilter(filter_size))
if __name__ == "__main__":
import argparse
from PIL import ImageOps
parser = argparse.ArgumentParser()
parser.add_argument("--operation", choices=("dilate", "erode"), default="dilate")
parser.add_argument("images", type=argparse.FileType("rb"), nargs="+")
args = parser.parse_args()
transformer = Dilate() if args.operation == "dilate" else Erode()
for f in args.images:
x = Image.open(f, "r").convert("L")
x = ImageOps.invert(x)
y = transformer(x)
w, h = x.size
z = Image.new("L", (w, 2 * h))
z.paste(x, (0, 0))
z.paste(y, (0, h))
z = z.resize(size=(w // 2, h), resample=Image.BICUBIC)
z.show()
input()
|
6157
|
import json
from typing import Dict, Optional
import requests
from federation.hostmeta.parsers import (
parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document,
parse_matrix_document, parse_misskey_document)
from federation.utils.network import fetch_document
HIGHEST_SUPPORTED_NODEINFO_VERSION = 2.1
def fetch_mastodon_document(host):
doc, status_code, error = fetch_document(host=host, path='/api/v1/instance')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_mastodon_document(doc, host)
def fetch_matrix_document(host: str) -> Optional[Dict]:
doc, status_code, error = fetch_document(host=host, path='/_matrix/federation/v1/version')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_matrix_document(doc, host)
def fetch_misskey_document(host: str, mastodon_document: Dict=None) -> Optional[Dict]:
try:
response = requests.post(f'https://{host}/api/meta') # ¯\_(ツ)_/¯
except Exception:
return
try:
doc = response.json()
except json.JSONDecodeError:
return
if response.status_code == 200:
return parse_misskey_document(doc, host, mastodon_document=mastodon_document)
def fetch_nodeinfo_document(host):
doc, status_code, error = fetch_document(host=host, path='/.well-known/nodeinfo')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
url, highest_version = '', 0.0
if doc.get('0'):
# Buggy NodeInfo from certain old Hubzilla versions
url = doc.get('0', {}).get('href')
elif isinstance(doc.get('links'), dict):
# Another buggy NodeInfo from certain old Hubzilla versions
url = doc.get('links').get('href')
else:
for link in doc.get('links'):
version = float(link.get('rel').split('/')[-1])
if highest_version < version <= HIGHEST_SUPPORTED_NODEINFO_VERSION:
url, highest_version = link.get('href'), version
if not url:
return
doc, status_code, error = fetch_document(url=url)
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_nodeinfo_document(doc, host)
def fetch_nodeinfo2_document(host):
doc, status_code, error = fetch_document(host=host, path='/.well-known/x-nodeinfo2')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_nodeinfo2_document(doc, host)
def fetch_statisticsjson_document(host):
doc, status_code, error = fetch_document(host=host, path='/statistics.json')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_statisticsjson_document(doc, host)
|
6171
|
for ch in "Hello world!":
d = ord(ch)
h = hex(d)
o = oct(d)
b = bin(d)
print ch, d, h, o, b
|
6178
|
from KeyValueTree import KeyValueTree
from truth.models import KeyValue as TruthKeyValue, Truth
from systems.models import KeyValue as KeyValue
from django.test.client import RequestFactory
from api_v2.keyvalue_handler import KeyValueHandler
import json
factory = RequestFactory()
class Rack:
rack_name = None
tree = None
kv = None
ru = None
width = None
systems = []
ethernet_patch_panel_24 = []
ethernet_patch_panel_48 = []
def __init__(self, rack_name):
self.systems = []
self.rack_name = rack_name
self.kv = Truth.objects.select_related('truth_key_value').get(name=self.rack_name)
self.system_list = KeyValue.objects.select_related('system').filter(value__contains="truth:%s" % (self.rack_name))
self.ethernet_patch_panel_24 = self._get_ethernet_patch_panels(self.kv, 'ethernet', 24)
self.ethernet_patch_panel_48 = self._get_ethernet_patch_panels(self.kv, 'ethernet', 48)
import pdb
h = KeyValueHandler()
for s in self.system_list:
request = factory.get('/api/v2/keyvalue/?keystore=%s' % (s.system.hostname), follow=True)
tree = h.read(request)
system_ru = self._get_system_ru(tree)
system_image = self._get_system_image(tree)
system_slot = self._get_system_slot(tree)
self.systems.append({
"system_name":s.system.hostname,
"system_id":s.system.id,
"system_ru":system_ru,
"system_image":system_image,
'system_slot':system_slot,
'operating_system':str(s.system.operating_system),
'server_model': str(s.system.server_model),
'oob_ip': str(s.system.oob_ip),
})
self.systems = sorted(self.systems, key=lambda k: k['system_slot'])
try:
self.ru = self.kv.keyvalue_set.get(key='rack_ru').value
except:
self.ru = 42
try:
self.width = self.kv.keyvalue_set.get(key='rack_width').value
except:
self.width = 30
def _get_ethernet_patch_panels(self, tree, type, port_count):
ret = []
for i in tree.keyvalue_set.all():
match_string = "%i_port_%s_patch_panel" % (port_count, type)
if str(i.key) == match_string:
ret.append(i.value)
return ret
def _get_system_ru(self, tree):
for i in tree.iterkeys():
try:
if 'system_ru' in i.split(':'):
return tree[i]
except:
pass
return 4
def _get_system_image(self, tree):
for i in tree.iterkeys():
try:
if 'system_image' in i.split(':'):
return tree[i]
except:
pass
return None
def _get_system_slot(self, tree):
for i in tree.iterkeys():
try:
if 'system_slot' in i.split(':'):
return tree[i]
except:
pass
return 1
|
6226
|
from django.core.exceptions import NON_FIELD_ERRORS
from rest_framework import status, viewsets, serializers
from rest_framework.decorators import list_route
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer
from jet_django.filters.model_aggregate import AggregateFilter
from jet_django.filters.model_group import GroupFilter
from jet_django.pagination import CustomPageNumberPagination
from jet_django.permissions import HasProjectPermissions, ModifyNotInDemo
from jet_django.serializers.reorder import reorder_serializer_factory
class AggregateSerializer(serializers.Serializer):
y_func = serializers.IntegerField()
def __init__(self, *args, **kwargs):
if 'y_func_serializer' in kwargs:
self.fields['y_func'] = kwargs.pop('y_func_serializer')
super().__init__(*args, **kwargs)
class GroupSerializer(serializers.Serializer):
group = serializers.CharField()
y_func = serializers.IntegerField()
def __init__(self, *args, **kwargs):
if 'group_serializer' in kwargs:
self.fields['group'] = kwargs.pop('group_serializer')
if 'y_func_serializer' in kwargs:
self.fields['y_func'] = kwargs.pop('y_func_serializer')
super().__init__(*args, **kwargs)
def model_viewset_factory(build_model, build_filter_class, build_serializer_class, build_detail_serializer_class, build_queryset, build_actions, ordering_field):
ReorderSerializer = reorder_serializer_factory(build_queryset, ordering_field)
class Viewset(viewsets.ModelViewSet):
model = build_model
queryset = build_queryset
pagination_class = CustomPageNumberPagination
filter_class = build_filter_class
authentication_classes = ()
permission_classes = (HasProjectPermissions, ModifyNotInDemo)
def get_serializer_class(self):
if self.action == 'aggregate':
return AggregateSerializer
elif self.action == 'group':
return GroupSerializer
elif self.action == 'retrieve':
return build_detail_serializer_class
else:
return build_serializer_class
@list_route(methods=['get'])
def aggregate(self, request):
queryset = self.filter_queryset(self.get_queryset())
y_func = request.GET['_y_func'].lower()
y_column = request.GET.get('_y_column', 'id')
y_field = self.model._meta.get_field(y_column)
y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field)
y_serializer = y_serializer_class(**y_serializer_kwargs)
queryset = AggregateFilter().filter(queryset, {
'y_func': y_func,
'y_column': y_column
})
serializer = self.get_serializer(
queryset,
y_func_serializer=y_serializer
)
return Response(serializer.data)
@list_route(methods=['get'])
def group(self, request):
queryset = self.filter_queryset(self.get_queryset())
x_column = request.GET['_x_column']
x_lookup_name = request.GET.get('_x_lookup')
y_func = request.GET['_y_func'].lower()
y_column = request.GET.get('_y_column', 'id')
x_field = self.model._meta.get_field(x_column)
x_lookup = x_field.class_lookups.get(x_lookup_name)
y_field = self.model._meta.get_field(y_column)
if x_lookup:
x_field = x_lookup('none').output_field
x_serializer_class, x_serializer_kwargs = ModelSerializer().build_standard_field(x_column, x_field)
x_serializer = x_serializer_class(**x_serializer_kwargs)
y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field)
y_serializer = y_serializer_class(**y_serializer_kwargs)
queryset = GroupFilter().filter(queryset, {
'x_column': x_column,
'x_lookup': x_lookup,
'y_func': y_func,
'y_column': y_column
})
serializer = self.get_serializer(
queryset,
many=True,
group_serializer=x_serializer,
y_func_serializer=y_serializer
)
return Response(serializer.data)
def get_serializer(self, *args, **kwargs):
"""
Return the serializer instance that should be used for validating and
deserializing input, and for serializing output.
"""
serializer_class = self.get_serializer_class()
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
@list_route(methods=['post'])
def reorder(self, request):
serializer = ReorderSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
@list_route(methods=['post'])
def reset_order(self, request):
i = 1
for instance in build_queryset:
setattr(instance, ordering_field, i)
instance.save()
i += 1
return Response({})
for action in build_actions:
def route(self, request):
form = action(data=request.data)
if not form.is_valid():
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
queryset = form.filer_queryset(self.get_queryset())
try:
result = form.save(queryset)
except Exception as e:
return Response({NON_FIELD_ERRORS: str(e)}, status=status.HTTP_400_BAD_REQUEST)
return Response({'action': form._meta.name, 'result': result})
decorator = list_route(methods=['post'])
route = decorator(route)
setattr(Viewset, action._meta.name, route)
return Viewset
|
6248
|
import os
import numpy as np
from scipy.io import loadmat
data = loadmat("data/hipp_2dtrack_a/smJun03p2.dat")
N = 49
data = reshape(data, 3, length(data)/3);
data = data';
size(data) % 43799-by-3
fclose(fid);
% sampling time
Ts = 0.0333;
duration = size(data,1) * Ts; % in second
Tmax = data(end, 3);
Tmin = data(1,3);
time_edges = [Tmin: 0.25: Tmax]; % 250 ms per bin
% interpolated rat's position in time bins
Rat_pos = interp1(data(:, 3), [data(:, 1), data(:, 2)], time_edges');
vel = abs(diff(Rat_pos, 1, 1 )); % row difference
vel = [vel(1, :); vel];
% 250 ms
rat_vel = 4 * sqrt(vel(:, 1).^2 + vel(:, 2).^2); % unit: cm/s
vel_ind = find(rat_vel >= 10); % RUN velocity threshold
% using RUN only
T = length(vel_ind);
% using Run + pause periods
T = length(time_edges);
AllSpikeData = zeros(C,T);
for i=1:C
str = ['Cell_num' num2str(i)];
fid = fopen(str, 'r');
cell_data = fscanf(fid, '%f');
cell_data = reshape(cell_data, 3, length(cell_data)/3)';
spike_time = cell_data(:, 3);
spike_pos = cell_data(:, 1:2);
[spike_time_count, bin] = histc(spike_time, time_edges); % column vector
% if analyzing the RUN period only uncomment this
% spike_time_count = spike_time_count(vel_ind);
AllSpikeData(i, :) = spike_time_count';
fclose(fid);
end
|
6264
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
def GetViewTemplate(view):
if not view: return None
elif hasattr(view, "ViewTemplateId"):
if view.ViewTemplateId.IntegerValue == -1: return None
else: return view.Document.GetElement(view.ViewTemplateId)
else: return None
views = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [GetViewTemplate(x) for x in views]
else: OUT = GetViewTemplate(views)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.