id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
74352
|
import pybullet as p
from gym import spaces
from diy_gym.addons.addon import Addon
class ExternalForce(Addon):
"""ExternalForce defines a addon that can be used to apply an external force to the base a model.
"""
def __init__(self, parent, config):
super(ExternalForce, self).__init__(parent, config)
# store the uid of the parent model
self.uid = parent.uid
# get the config for the position of the force or default to [0 0 0] if it's not specified
self.xyz = config.get('xyz', [0.0, 0.0, 0.0])
# define the space for the actions this addon expects to receive
self.action_space = spaces.Box(-10.0, 10.0, shape=(3, ), dtype='float32')
def update(self, action):
"""Call the pybullet function to apply the desired for to this model.
"""
p.applyExternalForce(self.uid, -1, action, self.xyz, p.WORLD_FRAME)
|
74361
|
import pytest
from yahooquery import Screener
def test_screener():
s = Screener()
assert s.get_screeners("most_actives") is not None
def test_available_screeners():
s = Screener()
assert s.available_screeners is not None
def test_bad_screener():
with pytest.raises(ValueError):
s = Screener()
assert s.get_screeners("most_active")
|
74381
|
from collections import OrderedDict
from fcgiproto.constants import FCGI_KEEP_CONN
class RequestEvent(object):
"""
Base class for events that target a specific request.
:ivar int request_id: identifier of the associated request
"""
__slots__ = ('request_id',)
def __init__(self, request_id):
self.request_id = request_id
class RequestBeginEvent(RequestEvent):
"""
Signals the application about a new incoming request.
:ivar int request_id: identifier of the request
:ivar int role: expected role of the application for the request
one of (``FCGI_RESPONDER``, ``FCGI_AUTHORIZER``, ``FCGI_FILTER``)
:ivar dict params: FCGI parameters for the request
"""
__slots__ = ('role', 'keep_connection', 'params')
def __init__(self, request_id, role, flags, params):
super(RequestBeginEvent, self).__init__(request_id)
self.role = role
self.keep_connection = flags & FCGI_KEEP_CONN
self.params = OrderedDict(params)
class RequestDataEvent(RequestEvent):
"""
Contains body data for the specified request.
An empty ``data`` argument signifies the end of the data stream.
:ivar int request_id: identifier of the request
:ivar bytes data: bytestring containing raw request data
"""
__slots__ = ('data',)
def __init__(self, request_id, data):
super(RequestDataEvent, self).__init__(request_id)
self.data = data
class RequestSecondaryDataEvent(RequestEvent):
"""
Contains secondary data for the specified request.
An empty ``data`` argument signifies the end of the data stream.
These events are only received for the ``FCGI_FILTER`` role.
:ivar int request_id: identifier of the request
:ivar bytes data: bytestring containing raw secondary data
"""
__slots__ = ('data',)
def __init__(self, request_id, data):
super(RequestSecondaryDataEvent, self).__init__(request_id)
self.data = data
class RequestAbortEvent(RequestEvent):
"""Signals the application that the server wants the specified request aborted."""
__slots__ = ()
|
74390
|
import spacy
nlp = spacy.load('en')
doc = nlp(u'President Trump has a dispute with Mexico over immigration. IBM and Apple are cooperating on marketing in 2014. Pepsi and Coke sell well to Asian and South American customers. He bought a Ford Escort for $20,000 and drove to the lake for the weekend. The parade was last Saturday.')
for entity in doc.ents:
print("label: {}\tlabel_: {}\ttext: {}".format(
entity.label,entity.label_,entity.text))
|
74399
|
import numpy as np
import unittest
from chainer import testing
from chainercv.experimental.links.model.pspnet import convolution_crop
class TestConvolutionCrop(unittest.TestCase):
def test_convolution_crop(self):
size = (8, 6)
stride = (8, 6)
n_channel = 3
img = np.random.uniform(size=(n_channel, 16, 12)).astype(np.float32)
crop_imgs, param = convolution_crop(
img, size, stride, return_param=True)
self.assertEqual(crop_imgs.shape, (4, n_channel) + size)
self.assertEqual(crop_imgs.dtype, np.float32)
for y in range(2):
for x in range(2):
self.assertEqual(param['y_slices'][2 * y + x].start, 8 * y)
self.assertEqual(
param['y_slices'][2 * y + x].stop, 8 * (y + 1))
self.assertEqual(param['x_slices'][2 * y + x].start, 6 * x)
self.assertEqual(
param['x_slices'][2 * y + x].stop, 6 * (x + 1))
for i in range(4):
self.assertEqual(param['crop_y_slices'][i].start, 0)
self.assertEqual(param['crop_y_slices'][i].stop, 8)
self.assertEqual(param['crop_x_slices'][i].start, 0)
self.assertEqual(param['crop_x_slices'][i].stop, 6)
testing.run_module(__name__, __file__)
|
74416
|
from ._version import __version__, __js__
def _jupyter_labextension_paths():
return [{"src": "labextension", "dest": __js__["name"]}]
def _jupyter_server_extension_points():
return [{"module": "jupyterlab_pullrequests"}]
def _load_jupyter_server_extension(server_app):
"""Registers the API handler to receive HTTP requests from the frontend extension.
Args:
server_app (jupyterlab.labapp.LabApp): JupyterLab application instance
"""
from .base import PRConfig
from .handlers import setup_handlers
setup_handlers(server_app.web_app, server_app.config)
server_app.log.info("Registered jupyterlab_pullrequests extension")
# for legacy launching with notebok (e.g. Binder)
_jupyter_server_extension_paths = _jupyter_server_extension_points
load_jupyter_server_extension = _load_jupyter_server_extension
# Entry points
def get_github_manager(config: "traitlets.config.Config") -> "jupyterlab_pullrequests.managers.PullRequestsManager":
"""GitHub Manager factory"""
from .managers.github import GitHubManager
return GitHubManager(config)
def get_gitlab_manager(config: "traitlets.config.Config") -> "jupyterlab_pullrequests.managers.PullRequestsManager":
"""GitLab Manager factory"""
from .managers.gitlab import GitLabManager
return GitLabManager(config)
|
74422
|
import itertools
import numpy as np
import networkx as nx
import vocab
def coref_score(instance, property_id):
return [ instance.subject_entity["coref_score"], instance.object_entity["coref_score"] ]
def el_score(instance, property_id):
return [ instance.subject_entity["el_score"], instance.object_entity["el_score"] ]
def _entity_linker_types_from_mention(entity):
arr = np.zeros(len(vocab.types), np.float32)
for i, t in enumerate(vocab.types):
if t in entity["types"]:
arr[i] = 1.0
return arr
def entity_linker_types(instance, property_id):
return np.concatenate([
_entity_linker_types_from_mention(instance.subject_entity),
_entity_linker_types_from_mention(instance.object_entity)
])
def wikidata_predicates(instance, property_id):
return None
def text_score(instance, property_id):
return [ instance.text_instance.scores[property_id] ]
|
74426
|
import numpy as np
from ..Delboeuf.delboeuf_parameters import _delboeuf_parameters_sizeinner, _delboeuf_parameters_sizeouter
def _ebbinghaus_parameters(illusion_strength=0, difference=0, size_min=0.25, distance=1, distance_auto=False):
# Size inner circles
parameters = _delboeuf_parameters_sizeinner(difference=difference, size_min=size_min)
inner_size_left = parameters["Size_Inner_Left"]
inner_size_right = parameters["Size_Inner_Right"]
# Position
position_left = -0.5
position_right = 0.5
# Base size outer circles
outer_size_left = size_min
outer_size_right = size_min
# Actual outer size based on illusion
outer_size_left, outer_size_right = _delboeuf_parameters_sizeouter(outer_size_left,
outer_size_right,
difference=difference,
illusion_strength=illusion_strength,
both_sizes=True)
# Location outer circles
l_outer_x, l_outer_y, l_distance_edges = _ebbinghaus_parameters_outercircles(x=position_left,
y=0,
size_inner=inner_size_left,
size_outer=outer_size_left,
n="auto")
r_outer_x, r_outer_y, r_distance_edges = _ebbinghaus_parameters_outercircles(x=position_right,
y=0,
size_inner=inner_size_right,
size_outer=outer_size_right,
n="auto")
# Get location and distances
if distance_auto is False:
distance_reference = 'Between Centers'
distance_centers = distance
position_left, position_right = -(distance_centers / 2), (distance_centers / 2)
distance_edges_inner = distance_centers - (inner_size_left/2 + inner_size_right/2)
distance_edges_outer = distance_centers - l_distance_edges - (outer_size_left/2) - r_distance_edges - (outer_size_right/2)
else:
distance_reference = 'Between Edges'
distance_edges_outer = distance
distance_centers = distance_edges_outer + l_distance_edges + (outer_size_left/2) + r_distance_edges + (outer_size_right/2)
distance_edges_inner = distance_centers - (outer_size_left/2 + outer_size_right/2)
position_left, position_right = -(distance_centers / 2), (distance_centers / 2)
parameters.update({
"Illusion": "Ebbinghaus",
"Illusion_Strength": illusion_strength,
"Illusion_Type": "Incongruent" if illusion_strength > 0 else "Congruent",
"Size_Outer_Left": outer_size_left,
"Size_Outer_Right": outer_size_right,
"Distance": distance_centers,
"Distance_Reference": distance_reference,
"Distance_Edges_Inner": distance_edges_inner,
"Distance_Edges_Outer": distance_edges_outer,
"Size_Inner_Smaller": np.min([inner_size_left, inner_size_right]),
"Size_Inner_Larger": np.max([inner_size_left, inner_size_right]),
"Size_Outer_Smaller": np.min([outer_size_left, outer_size_right]),
"Size_Outer_Larger": np.max([outer_size_left, outer_size_right]),
"Position_Outer_x_Left": l_outer_x,
"Position_Outer_y_Left": l_outer_y,
"Position_Outer_x_Right": r_outer_x,
"Position_Outer_y_Right": r_outer_y,
"Position_Left": position_left,
"Position_Right": position_right
})
return parameters
def _ebbinghaus_parameters_outercircles(x=0, y=0, size_inner=0.25, size_outer=0.3, n="auto"):
# Find distance between center of inner circle and centers of outer circles
distance = (size_inner / 2) + (size_outer / 2) + 0.01
# Find n
if n == "auto":
perimeter = 2 * np.pi * distance
n = int(perimeter / size_outer)
# Get position of outer circles
angle = np.deg2rad(np.linspace(0, 360, num=n, endpoint=False))
circle_x = x + (np.cos(angle) * distance)
circle_y = y + (np.sin(angle) * distance)
return circle_x, circle_y, distance
|
74428
|
import json
import os
from typing import List, Tuple
import pytest
@pytest.fixture(scope='module')
def here():
return os.path.abspath(os.path.dirname(__file__))
@pytest.fixture(scope='module')
def accounts(here) -> List[Tuple] or None:
"""Return account list"""
accounts_path = os.path.join(here, 'config')
if not os.path.exists(accounts_path):
return None
with open(accounts_path, 'r') as f:
raw = f.read()
return json.loads(raw)
|
74429
|
import datetime
from typing import TYPE_CHECKING, Generator
import Evtx.Evtx as evtx
from lxml import etree
from beagle.common.logging import logger
from beagle.datasources.base_datasource import DataSource
from beagle.transformers.evtx_transformer import WinEVTXTransformer
if TYPE_CHECKING:
from beagle.transformer.base_transformer import Transformer
from typing import List
class WinEVTX(DataSource):
"""Parses Windows .evtx files. Yields events one by one using the `python-evtx` library.
Parameters
----------
evtx_log_file : str
The path to the windows evtx file to parse.
"""
name = "Windows EVTX File"
transformers = [WinEVTXTransformer] # type: List[Transformer]
category = "Windows Event Logs"
def __init__(self, evtx_log_file: str) -> None:
self.file_path = evtx_log_file
logger.info(f"Setting up WinEVTX for {self.file_path}")
def events(self) -> Generator[dict, None, None]:
with evtx.Evtx(self.file_path) as log:
for record in log.records():
# Get the lxml object
yield self.parse_record(record.lxml())
def metadata(self) -> dict:
"""Get the hostname by inspecting the first record.
Returns
-------
dict
>>> {"hostname": str}
"""
with evtx.Evtx(self.file_path) as log:
for record in log.records():
# Get the lxml object
event = self.parse_record(record.lxml())
break
return {"hostname": event["computer"]}
def parse_record(self, record: etree.ElementTree, name="") -> dict:
"""Recursivly converts a etree.ElementTree record to a JSON dictionary
with one level.
Parameters
----------
record : etree.ElementTree
Current record to parse
name : str, optional
Name of the current key we are at.
Returns
-------
dict
JSON represntation of the event
"""
data = {}
for node in record:
next_name = node.tag.split("}")[-1]
# Recurse
data.update(self.parse_record(node, next_name))
if record.attrib and record.text:
key = f"{name}_{record.keys()[0]}".lower()
# Use attributes if we're in EventData
if "EventData" in record.getparent().tag:
key += f"_{record.values()[0]}".lower()
data[key] = record.text
elif record.attrib:
for k, val in record.attrib.items():
key = f"{name}_{k}".lower()
data[key] = val
else:
curr_name = record.tag.split("}")[-1]
key = f"{curr_name}".lower()
data[key] = record.text
if key == "timecreated_systemtime":
time = datetime.datetime.strptime(
data["timecreated_systemtime"], "%Y-%m-%d %H:%M:%S.%f"
)
epoch = int(time.strftime("%s"))
data["timecreated_systemtime"] = epoch
return data
|
74447
|
import tempfile
import strax
import straxen
from straxen.test_utils import nt_test_run_id, DummyRawRecords, testing_config_1T, test_run_id_1T
def _run_plugins(st,
make_all=False,
run_id=nt_test_run_id,
from_scratch=False,
**process_kwargs):
"""
Try all plugins (except the DAQReader) for a given context (st) to see if
we can really push some (empty) data from it and don't have any nasty
problems like that we are referring to some non existant dali folder.
"""
with tempfile.TemporaryDirectory() as temp_dir:
if from_scratch:
st.storage = [strax.DataDirectory(temp_dir)]
# As we use a temporary directory we should have a clean start
assert not st.is_stored(run_id, 'raw_records'), 'have RR???'
# Don't concern ourselves with rr_aqmon et cetera
_forbidden_plugins = tuple([p for p in
straxen.daqreader.DAQReader.provides
if p not in
st._plugin_class_registry['raw_records'].provides])
st.set_context_config({'forbid_creation_of': _forbidden_plugins})
# Create event info
target = 'event_info'
st.make(run_id=run_id,
targets=target,
**process_kwargs)
# The stuff should be there
assert st.is_stored(run_id, target), f'Could not make {target}'
if not make_all:
return
end_targets = set(st._get_end_targets(st._plugin_class_registry))
for p in end_targets - set(_forbidden_plugins):
if 'raw' in p:
continue
st.make(run_id, p)
# Now make sure we can get some data for all plugins
all_datatypes = set(st._plugin_class_registry.keys())
for p in all_datatypes - set(_forbidden_plugins):
should_be_stored = (st._plugin_class_registry[p].save_when ==
strax.SaveWhen.ALWAYS)
if should_be_stored:
is_stored = st.is_stored(run_id, p)
assert is_stored, f"{p} did not save correctly!"
print("Wonderful all plugins work (= at least they don't fail), bye bye")
def _update_context(st, max_workers, nt=True):
# Ignore strax-internal warnings
st.set_context_config({'free_options': tuple(st.config.keys())})
if not nt:
st.register(DummyRawRecords)
if straxen.utilix_is_configured(warning_message=False):
# Set some placeholder gain as this takes too long for 1T to load from CMT
st.set_config({k: v for k, v in testing_config_1T.items() if
k in ('hev_gain_model', 'gain_model')})
else:
st.set_config(testing_config_1T)
if max_workers - 1:
st.set_context_config({
'allow_multiprocess': True,
'allow_lazy': False,
'timeout': 60, # we don't want to build travis for ever
})
print('--- Plugins ---')
for k, v in st._plugin_class_registry.items():
print(k, v)
def _test_child_options(st, run_id):
"""
Test which checks if child options are handled correctly.
"""
# Register all used plugins
plugins = []
already_seen = []
for data_type in st._plugin_class_registry.keys():
if data_type in already_seen or data_type in straxen.DAQReader.provides:
continue
p = st.get_single_plugin(run_id, data_type)
plugins.append(p)
already_seen += p.provides
# Loop over all plugins and check if child options were propagated to the parent:
for p in plugins:
for option_name, option in p.takes_config.items():
# Check if option is a child option:
if option.child_option:
# Get corresponding parent option. Do not have to test if
# parent option name is defined this is already done in strax
parent_name = option.parent_option_name
# Now check if parent config was replaced with child:
t = p.config[parent_name] == p.config[option_name]
assert t, (f'This is strange the child option "{option_name}" was set to '
f'{p.config[option_name]}, but the corresponding parent config'
f' "{parent_name}" has the value {p.config[parent_name]}. '
f'Please check the options of {p.__class__.__name__} and if '
'it is a child plugin (child_plugin=True)!')
# Test if parent names were removed from the lineage:
t = parent_name in p.lineage[p.provides[-1]][2]
assert not t, (f'Found "{parent_name}" in the lineage of {p.__class__.__name__}. '
f'This should not have happend since "{parent_name}" is a child of '
f'"{option_name}"!')
def test_1T(ncores=1):
if ncores == 1:
print('-- 1T lazy mode --')
st = straxen.contexts.xenon1t_dali()
_update_context(st, ncores, nt=False)
# Register the 1T plugins for this test as well
st.register_all(straxen.plugins.x1t_cuts)
for _plugin, _plugin_class in st._plugin_class_registry.items():
if 'cut' in str(_plugin).lower():
_plugin_class.save_when = strax.SaveWhen.ALWAYS
# Run the test
_run_plugins(st, make_all=True, max_workers=ncores, run_id=test_run_id_1T, from_scratch=True)
# Test issue #233
st.search_field('cs1')
# set all the configs to be non-CMT
st.set_config(testing_config_1T)
_test_child_options(st, test_run_id_1T)
print(st.context_config)
def test_nT(ncores=1):
if ncores == 1:
print('-- nT lazy mode --')
init_database = straxen.utilix_is_configured(warning_message=False)
st = straxen.test_utils.nt_test_context(
_database_init=init_database,
use_rucio=False,
)
_update_context(st, ncores, nt=True)
# Lets take an abandoned run where we actually have gains for in the CMT
_run_plugins(st, make_all=True, max_workers=ncores, run_id=nt_test_run_id)
# Test issue #233
st.search_field('cs1')
# Test of child plugins:
_test_child_options(st, nt_test_run_id)
print(st.context_config)
def test_nT_mutlticore():
print('nT multicore')
test_nT(2)
|
74456
|
import caffe2onnx.src.c2oObject as Node
from typing import List
import copy
def get_concat_attributes(layer):
axis = layer.concat_param.axis
attributes = {"axis": axis}
return attributes
def get_concat_outshape(layer, input_shape: List) -> List:
bottom = input_shape[0]
axis = layer.concat_param.axis
output_shape = copy.deepcopy(bottom)
assert (axis < len(bottom))
for i in range(1, len(input_shape)):
output_shape[axis] = output_shape[axis] + input_shape[i][axis]
return [output_shape]
#
# if len(bottom) == 2:
# n, c = bottom[0], 0
# for i in range(len(input_shape)):
# c = c + input_shape[i][1]
# output_shape = [[n, c]]
# return output_shape
#
# elif len(bottom) == 3:
# n, c = bottom[0], 0
# for i in range(len(input_shape)):
# c = c + input_shape[i][1]
# output_shape = [[n, c]]
# return output_shape
#
# elif len(bottom) == 4:
# n, c, w, h = input_shape[0][0], 0, input_shape[0][2], input_shape[0][3]
# for i in range(len(input_shape)):
# c = c + input_shape[i][1]
# output_shape = [[n, c, w, h]]
# return output_shape
def createConcat(layer, nodename, inname, outname, input_shape):
attributes = get_concat_attributes(layer)
output_shape = get_concat_outshape(layer, input_shape)
node = Node.c2oNode(layer, nodename, "Concat", inname, outname, input_shape, output_shape, attributes)
return node
|
74521
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__license__ = 'MIT'
__maintainer__ = ['<NAME>']
__email__ = ['<EMAIL>']
|
74525
|
from abc import ABC, abstractmethod
import gym
class BaseGymEnvironment(gym.Env):
"""Base class for all Gym environments."""
@property
def parameters(self):
"""Return environment parameters."""
return {
'id': self.spec.id,
}
class EnvBinarySuccessMixin(ABC):
"""Adds binary success metric to environment."""
@abstractmethod
def is_success(self):
"""Returns True is current state indicates success, False otherwise"""
pass
|
74537
|
import numpy as np
def moving_average(a, n=3) :
"""
perform moving average, return a vector of same length as input
"""
a=a.ravel()
a = np.concatenate(([a[0]]*(n-1),a)) # repeating first values
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
ret=ret[n - 1:] / n
return ret
|
74543
|
import click
from train_anomaly_detection import main_func
import numpy as np
import os
# Define base parameters.
dataset_name = 'selfsupervised'
net_name = 'StackConvNet'
xp_path_base = 'log'
data_path = 'data/full'
train_folder = 'train'
val_pos_folder = 'val/wangen_sun_3_pos'
val_neg_folder = 'val/wangen_sun_3_neg'
load_config = None
load_model = None
nu = 0.1
device = 'cuda'
seed = -1
optimizer_name = 'adam'
lr = 0.0001
n_epochs = 150
lr_milestone = (100,)
batch_size = 200
weight_decay = 0.5e-6
ae_optimizer_name = 'adam'
ae_lr = 0.0001
ae_n_epochs = 350
ae_lr_milestone = (250,)
ae_batch_size = 200
ae_weight_decay = 0.5e-6
n_jobs_dataloader = 0
normal_class = 1
batchnorm = False
dropout = False
augment = False
objectives = [
{'objective': 'real-nvp', 'pretrain': True, 'fix_encoder': True}, # 0
{'objective': 'soft-boundary', 'pretrain': True, 'fix_encoder': False}, # 1
{'objective': 'one-class', 'pretrain': True, 'fix_encoder': False}, # 2
{'objective': 'real-nvp', 'pretrain': False, 'fix_encoder': False}, # 3
{'objective': 'real-nvp', 'pretrain': True, 'fix_encoder': False}, # 4
{'objective': 'one-class', 'pretrain': False, 'fix_encoder': False}, # 5
{'objective': 'soft-boundary', 'pretrain': False, 'fix_encoder': False} # 6
]
modalities = [
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': True , 'normals': False, 'normal_angle': True },
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': False, 'ir': True , 'depth': False, 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': False, 'ir': False, 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': True , 'ir': False, 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': False, 'ir': True , 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': True , 'ir': True , 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': True , 'ir': True , 'depth': True , 'depth_3d': False, 'normals': True , 'normal_angle': False},
{'rgb': True , 'ir': True , 'depth': False, 'depth_3d': True , 'normals': False, 'normal_angle': True },
{'rgb': True , 'ir': False, 'depth': True , 'depth_3d': False, 'normals': True , 'normal_angle': False},
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': False, 'normals': True , 'normal_angle': False},
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': True , 'normals': False, 'normal_angle': False},
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': False, 'normals': False, 'normal_angle': True },
{'rgb': False, 'ir': False, 'depth': True , 'depth_3d': False, 'normals': True , 'normal_angle': False},
{'rgb': False, 'ir': False, 'depth': False, 'depth_3d': True , 'normals': False, 'normal_angle': True },
{'rgb': True , 'ir': False, 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': True },
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': True , 'normals': True , 'normal_angle': False},
{'rgb': False, 'ir': False, 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': True }
]
N_ITER = 10
auc_mat = np.zeros((N_ITER, len(objectives)+1, len(modalities))) # +1 for Autoencoder
for it in range(N_ITER):
xp_path = os.path.join(xp_path_base, str(it))
for i, obj in enumerate(objectives):
for j, mod in enumerate(modalities):
train_obj = main_func(dataset_name, net_name, xp_path, data_path, train_folder,
val_pos_folder, val_neg_folder, load_config, load_model, obj['objective'], nu,
device, seed, optimizer_name, lr, n_epochs, lr_milestone, batch_size,
weight_decay, obj['pretrain'], ae_optimizer_name, ae_lr, ae_n_epochs,
ae_lr_milestone, ae_batch_size, ae_weight_decay, n_jobs_dataloader, normal_class,
mod['rgb'], mod['ir'], mod['depth'], mod['depth_3d'], mod['normals'],
mod['normal_angle'], batchnorm, dropout, augment, obj['fix_encoder'])
auc = train_obj.results['test_auc']
auc_ae = train_obj.results['test_auc_ae']
auc_mat[it, i,j] = auc
if auc_ae is not None:
auc_mat[it, -1,j] = auc_ae
np.save(os.path.join(xp_path, 'auc.npy'), auc_mat)
np.save(os.path.join(xp_path_base, 'auc.npy'), auc_mat)
print('avg')
print(np.mean(auc_mat, axis=0))
print('std')
print(np.std(auc_mat, axis=0))
|
74548
|
import logging
import oscar
'''
This family is deprecated, so it is remaining unimplemented
'''
x0c_name="Translation - deprecated"
log = logging.getLogger('oscar.snac.x0c')
subcodes = {}
def x0c_init(o, sock, cb):
log.info('initializing')
cb()
log.info('finished initializing')
def x0c_x01(o, sock, data):
'''
SNAC (xc, x1): Translation Family Error
reference: U{http://iserverd.khstu.ru/oscar/snac_0c_01.html}
'''
errcode, errmsg, subcode = oscar.snac.error(data)
submsg = subcodes.setdefault(subcode, 'Unknown') if subcode else None
raise oscar.snac.SnacError(0x0c, (errcode, errmsg), (subcode, submsg))
def x0c_x02(o, sock, data):
'''
SNAC (xc, x2): client translate request
reference: U{http://iserverd.khstu.ru/oscar/snac_0c_02.html}
'''
raise NotImplementedError
def x0c_x03(o, sock, data):
'''
SNAC (xc, x3): translate response
reference: U{http://iserverd.khstu.ru/oscar/snac_0c_03.html}
'''
raise NotImplementedError
|
74556
|
from src.environments.slippery_grid import SlipperyGrid
import numpy as np
# A modified version of OpenAI Gym FrozenLake
# only the labelling function needs to be specified
sinks = []
for i in range(12, 16):
for j in range(15, 19):
sinks.append([i, j])
# create a SlipperyGrid object
FrozenLake = SlipperyGrid(shape=[20, 20],
initial_state=[0, 10],
slip_probability=0.1,
sink_states=sinks
)
# define the labellings
labels = np.empty([FrozenLake.shape[0], FrozenLake.shape[1]], dtype=object)
labels[0:20, 0:20] = 'safe'
labels[4:8, 9:13] = 'unsafe'
labels[12:16, 15:19] = 'goal1'
labels[15:19, 15:19] = 'goal2'
labels[9:13, 9:13] = 'goal3'
labels[0:4, 15:19] = 'goal4'
# override the labels
FrozenLake.labels = labels
# FrozenLake doesn't have the action "stay"
FrozenLake.action_space = [
"right",
"up",
"left",
"down",
]
|
74576
|
import argparse
import imagesize
import os
import subprocess
parser = argparse.ArgumentParser(description='MegaDepth Undistortion')
parser.add_argument(
'--colmap_path', type=str, required=True,
help='path to colmap executable'
)
parser.add_argument(
'--base_path', type=str, required=True,
help='path to MegaDepth'
)
args = parser.parse_args()
sfm_path = os.path.join(
args.base_path, 'MegaDepth_v1_SfM'
)
base_depth_path = os.path.join(
args.base_path, 'MegaDepth_v1'
)
output_path = os.path.join(
args.base_path, 'Undistorted_SfM'
)
os.mkdir(output_path)
for scene_name in os.listdir(base_depth_path):
current_output_path = os.path.join(output_path, scene_name)
os.mkdir(current_output_path)
image_path = os.path.join(
base_depth_path, scene_name, 'dense0', 'imgs'
)
if not os.path.exists(image_path):
continue
# Find the maximum image size in scene.
max_image_size = 0
for image_name in os.listdir(image_path):
max_image_size = max(
max_image_size,
max(imagesize.get(os.path.join(image_path, image_name)))
)
# Undistort the images and update the reconstruction.
subprocess.call([
os.path.join(args.colmap_path, 'colmap'), 'image_undistorter',
'--image_path', os.path.join(sfm_path, scene_name, 'images'),
'--input_path', os.path.join(sfm_path, scene_name, 'sparse', 'manhattan', '0'),
'--output_path', current_output_path,
'--max_image_size', str(max_image_size)
])
# Transform the reconstruction to raw text format.
sparse_txt_path = os.path.join(current_output_path, 'sparse-txt')
os.mkdir(sparse_txt_path)
subprocess.call([
os.path.join(args.colmap_path, 'colmap'), 'model_converter',
'--input_path', os.path.join(current_output_path, 'sparse'),
'--output_path', sparse_txt_path,
'--output_type', 'TXT'
])
|
74587
|
import numpy as np
import util
from linear_model import LinearModel
def main(train_path, eval_path, pred_path):
"""Problem 1(b): Logistic regression with Newton's Method.
Args:
train_path: Path to CSV file containing dataset for training.
eval_path: Path to CSV file containing dataset for evaluation.
pred_path: Path to save predictions.
"""
x_train, y_train = util.load_dataset(train_path, add_intercept=True)
# *** START CODE HERE ***
# *** END CODE HERE ***
class LogisticRegression(LinearModel):
"""Logistic regression with Newton's Method as the solver.
Example usage:
> clf = LogisticRegression()
> clf.fit(x_train, y_train)
> clf.predict(x_eval)
"""
def fit(self, x, y):
"""Run Newton's Method to minimize J(theta) for logistic regression.
Args:
x: Training example inputs. Shape (m, n).
y: Training example labels. Shape (m,).
"""
# *** START CODE HERE ***
# *** END CODE HERE ***
def predict(self, x):
"""Make a prediction given new inputs x.
Args:
x: Inputs of shape (m, n).
Returns:
Outputs of shape (m,).
"""
# *** START CODE HERE ***
# *** END CODE HERE ***
|
74613
|
from lightning import Lightning
from lightning.types.base import Base
from functools import wraps
import inspect
def viztype(VizType):
# wrapper that passes inputs to cleaning function and creates viz
@wraps(VizType.clean)
def plotter(self, *args, **kwargs):
if kwargs['height'] is None and kwargs['width'] is None:
if self.size != 'full':
kwargs['width'] = SIZES[self.size]
if self.local_enabled:
if hasattr(VizType, '_local') and VizType._local == False:
name = VizType._func if hasattr(VizType, 'func') else VizType._name
print("Plots of type '%s' not yet supported in local mode" % name)
else:
viz = VizType._baseplot_local(VizType._name, *args, **kwargs)
return viz
else:
if not hasattr(self, 'session'):
self.create_session()
if VizType._name == 'plot':
if 'type' not in kwargs:
raise ValueError("Must specify a type for custom plots")
else:
type = kwargs['type']
del kwargs['type']
viz = VizType._baseplot(self.session, type, *args, **kwargs)
else:
viz = VizType._baseplot(self.session, VizType._name, *args, **kwargs)
self.session.visualizations.append(viz)
return viz
# get desired function name if different than plot type
if hasattr(VizType, '_func'):
func = VizType._func
else:
func = VizType._name
# crazy hack to give the dynamically generated function the correct signature
# based on: http://emptysqua.re/blog/copying-a-python-functions-signature/
# NOTE currently only handles functions with keyword arguments with defaults of None
options = {}
if hasattr(VizType, '_options'):
options = VizType._options
def parse(val):
if isinstance(val, str):
return "'" + val + "'"
else:
return val
formatted_options = ', '.join(['%s=%s' % (key, parse(value.get('default'))) for (key, value) in options.items()])
argspec = inspect.getargspec(VizType.clean)
formatted_args = inspect.formatargspec(*argspec)
fndef = 'lambda self, %s, %s: plotter(self,%s, %s)' \
% (formatted_args.lstrip('(').rstrip(')'),
formatted_options, formatted_args[1:].replace('=None', '').rstrip(')'),
', '.join('%s=%s' % (key, key) for key in options.keys()))
fake_fn = eval(fndef, {'plotter': plotter})
plotter = wraps(VizType.clean)(fake_fn)
# manually assign a plot-specific name (instead of 'clean')
plotter.__name__ = func
if plotter.__doc__:
plotter.__doc__ += Base._doc
# add plotter to class
setattr(Lightning, func, plotter)
return VizType
SIZES = {
'small': 400,
'medium': 600,
'large': 800,
}
|
74656
|
import os
import pytest
import shutil
import flask
from flask import Flask
import graphene
import pprint
from graphene.test import Client
from mock import patch
import responses
from gtmcore.auth.identity import get_identity_manager_class
from gtmcore.configuration import Configuration
from gtmcore.gitlib import RepoLocation
from gtmcore.inventory.branching import BranchManager
from gtmcore.inventory.inventory import InventoryManager
from gtmcore.files import FileOperations
from lmsrvcore.middleware import DataloaderMiddleware, error_middleware
from lmsrvlabbook.tests.fixtures import ContextMock, fixture_working_dir, _create_temp_work_dir, \
fixture_working_dir_lfs_disabled
from gtmcore.fixtures import helper_create_remote_repo, flush_redis_repo_cache
from lmsrvlabbook.api.query import LabbookQuery
from lmsrvlabbook.api.mutation import LabbookMutations
from gtmcore.workflows import LabbookWorkflow
UT_USERNAME = "default"
UT_LBNAME = "unittest-workflow-branch-1"
@pytest.fixture()
def mock_create_labbooks(fixture_working_dir):
# Create a labbook in the temporary directory
im = InventoryManager()
lb = im.create_labbook(UT_USERNAME, UT_USERNAME, UT_LBNAME, description="Cats labbook 1")
# Create a file in the dir
with open(os.path.join(fixture_working_dir[1], 'unittest-examplefile'), 'w') as sf:
sf.write("test data")
sf.seek(0)
FileOperations.insert_file(lb, 'code', sf.name)
assert os.path.isfile(os.path.join(lb.root_dir, 'code', 'unittest-examplefile'))
# Create test client
schema = graphene.Schema(query=LabbookQuery, mutation=LabbookMutations)
app = Flask("lmsrvlabbook")
app.config["LABMGR_CONFIG"] = config = Configuration()
app.config["ID_MGR_CLS"] = get_identity_manager_class(config)
with app.app_context():
flask.g.user_obj = get_identity_manager_class(config)(config).get_user_profile()
client = Client(schema, middleware=[DataloaderMiddleware(), error_middleware],
context_value=ContextMock())
yield lb, client, schema
shutil.rmtree(fixture_working_dir, ignore_errors=True)
class TestWorkflowsBranching(object):
def test_active_branch_name(self, mock_create_labbooks):
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
bm = BranchManager(lb, username=UT_USERNAME)
q = f"""
{{
labbook(name: "{UT_LBNAME}", owner: "{UT_USERNAME}") {{
activeBranchName
workspaceBranchName
}}
}}
"""
r = client.execute(q)
assert 'errors' not in r
assert r['data']['labbook']['activeBranchName'] == bm.active_branch
assert r['data']['labbook']['workspaceBranchName'] == bm.workspace_branch
def test_available_branches(self, mock_create_labbooks):
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
bm = BranchManager(lb, username=UT_USERNAME)
q = f"""
{{
labbook(name: "{UT_LBNAME}", owner: "{UT_USERNAME}") {{
branches {{
branchName
isLocal
isRemote
isActive
}}
}}
}}
"""
r = client.execute(q)
pprint.pprint(r)
assert 'errors' not in r
assert len(r['data']['labbook']['branches']) == 1
assert r['data']['labbook']['branches'][0]['branchName'] == bm.workspace_branch
assert r['data']['labbook']['branches'][0]['isLocal'] == True, "Should be local"
assert r['data']['labbook']['branches'][0]['isRemote'] == False, "There should be no remote branches"
assert r['data']['labbook']['branches'][0]['isActive'] == True
def test_query_mergeable_branches_from_main(self, mock_create_labbooks):
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
bm = BranchManager(lb, username=UT_USERNAME)
b1 = bm.create_branch(f"tester1")
bm.workon_branch(bm.workspace_branch)
b2 = bm.create_branch(f"tester2")
bm.workon_branch(bm.workspace_branch)
assert bm.active_branch == bm.workspace_branch
q = f"""
{{
labbook(name: "{UT_LBNAME}", owner: "{UT_USERNAME}") {{
branches {{
branchName
isMergeable
}}
}}
}}
"""
r = client.execute(q)
assert 'errors' not in r
assert len(r['data']['labbook']['branches']) == 3
assert r['data']['labbook']['branches'][0]['branchName'] == 'master'
assert r['data']['labbook']['branches'][0]['isMergeable'] == False
assert r['data']['labbook']['branches'][1]['branchName'] == 'tester1'
assert r['data']['labbook']['branches'][1]['isMergeable'] == True
assert r['data']['labbook']['branches'][2]['branchName'] == 'tester2'
assert r['data']['labbook']['branches'][2]['isMergeable'] == True
def test_query_mergeable_branches_from_feature_branch(self, mock_create_labbooks):
# Per current branch model, can only merge in workspace branch
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
bm = BranchManager(lb, username=UT_USERNAME)
b1 = bm.create_branch(f"tester1")
bm.workon_branch(bm.workspace_branch)
b2 = bm.create_branch(f"tester2")
q = f"""
{{
labbook(name: "{UT_LBNAME}", owner: "{UT_USERNAME}") {{
workspaceBranchName
branches {{
branchName
isMergeable
}}
}}
}}
"""
r = client.execute(q)
assert 'errors' not in r
assert r['data']['labbook']['workspaceBranchName'] == bm.workspace_branch
branches = r['data']['labbook']['branches']
assert branches[0]['branchName'] == 'master'
assert branches[0]['isMergeable'] is True
assert branches[1]['branchName'] == 'tester1'
assert branches[1]['isMergeable'] is True
assert branches[2]['branchName'] == 'tester2'
assert branches[2]['isMergeable'] is False
def test_create_feature_branch_bad_name_fail(self, mock_create_labbooks):
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
bm = BranchManager(lb, username=UT_USERNAME)
bad_branch_names = ['', '_', 'Über-bad', 'xxx-xxx' * 40, 'cats_99', 'bad-', '-', '-bad', 'bad--bad',
'bad---bad--bad-bad', 'Nope', 'Nope99', 'Nope-99', 'N&PE', 'n*ope', 'no;way', 'no:way',
'<nope>-not-a-branch', 'Robert") DROP TABLE Students; --', "no way not a branch",
''.join(chr(x) for x in range(0, 78)), ''.join(chr(x) for x in range(0, 255)),
chr(0) * 10, chr(0) * 10000]
for bad_name in bad_branch_names:
q = f"""
mutation makeFeatureBranch {{
createExperimentalBranch(input: {{
owner: "{UT_USERNAME}",
labbookName: "{UT_LBNAME}",
branchName: "{bad_name}"
}}) {{
newBranchName
}}
}}
"""
r = client.execute(q)
pprint.pprint(r)
assert 'errors' in r
assert bm.active_branch == bm.workspace_branch
assert lb.is_repo_clean
def test_create_feature_branch_from_feature_branch_fail(self, mock_create_labbooks):
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
bm = BranchManager(lb, username=UT_USERNAME)
b1 = bm.create_branch(f"tester1")
q = f"""
mutation makeFeatureBranch {{
createExperimentalBranch(input: {{
owner: "{UT_USERNAME}",
labbookName: "{UT_LBNAME}",
branchName: "valid-branch-name"
}}) {{
newBranchName
}}
}}
"""
r = client.execute(q)
pprint.pprint(r)
assert 'errors' in r
assert bm.active_branch == b1
assert lb.is_repo_clean
def test_create_feature_branch_success(self, mock_create_labbooks):
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
bm = BranchManager(lb, username=UT_USERNAME)
b1 = bm.create_branch(f"tester1")
bm.workon_branch(bm.workspace_branch)
q = f"""
mutation makeFeatureBranch {{
createExperimentalBranch(input: {{
owner: "{UT_USERNAME}",
labbookName: "{UT_LBNAME}",
branchName: "valid-branch-name-working1"
}}) {{
labbook{{
name
activeBranchName
branches {{
branchName
}}
}}
}}
}}
"""
r = client.execute(q)
assert 'errors' not in r
assert r['data']['createExperimentalBranch']['labbook']['activeBranchName'] \
== 'valid-branch-name-working1'
assert set([n['branchName'] for n in r['data']['createExperimentalBranch']['labbook']['branches']]) \
== set(['tester1', 'master', 'valid-branch-name-working1'])
assert lb.active_branch == 'valid-branch-name-working1'
assert lb.is_repo_clean
def test_create_feature_branch_success_update_description(self, mock_create_labbooks):
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
bm = BranchManager(lb, username=UT_USERNAME)
b1 = bm.create_branch(f"tester1")
bm.workon_branch(bm.workspace_branch)
q = f"""
mutation makeFeatureBranch {{
createExperimentalBranch(input: {{
owner: "{UT_USERNAME}",
labbookName: "{UT_LBNAME}",
branchName: "valid-branch-name-working1"
description: "Updated description"
}}) {{
labbook{{
name
description
branches {{
branchName
}}
activeBranchName
}}
}}
}}
"""
r = client.execute(q)
assert 'errors' not in r
assert r['data']['createExperimentalBranch']['labbook']['activeBranchName'] \
== 'valid-branch-name-working1'
assert r['data']['createExperimentalBranch']['labbook']['description'] \
== "Updated description"
assert bm.active_branch == 'valid-branch-name-working1'
assert lb.is_repo_clean
# Make sure activity record was created when description was changed
log_data = lb.git.log()
assert "_GTM_ACTIVITY_START_**\nmsg:Updated description of Project" in log_data[0]['message']
def test_delete_feature_branch_fail(self, mock_create_labbooks):
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
bm = BranchManager(lb, username=UT_USERNAME)
b1 = bm.create_branch(f"tester1")
q = f"""
mutation makeFeatureBranch {{
deleteExperimentalBranch(input: {{
owner: "{UT_USERNAME}",
labbookName: "{UT_LBNAME}",
branchName: "{b1}",
deleteLocal: true
}}) {{
success
}}
}}
"""
r = client.execute(q)
pprint.pprint(r)
# Cannot delete branch when it's the currently active branch
assert 'errors' in r
assert bm.active_branch == b1
assert lb.is_repo_clean
def test_delete_feature_branch_success(self, mock_create_labbooks):
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
bm = BranchManager(lb, username=UT_USERNAME)
b1 = bm.create_branch(f"tester1")
bm.workon_branch(bm.workspace_branch)
q = f"""
mutation makeFeatureBranch {{
deleteExperimentalBranch(input: {{
owner: "{UT_USERNAME}",
labbookName: "{UT_LBNAME}",
branchName: "{b1}",
deleteLocal: true
}}) {{
labbook {{
branches {{
branchName
}}
}}
}}
}}
"""
r = client.execute(q)
pprint.pprint(r)
# Cannot delete branch when it's the currently active branch
assert 'errors' not in r
assert bm.active_branch == bm.workspace_branch
assert lb.is_repo_clean
assert b1 not in bm.branches_local
def test_workon_feature_branch_bad_name_fail(self, mock_create_labbooks):
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
bm = BranchManager(lb, username=UT_USERNAME)
b1 = bm.create_branch(f"tester1")
bm.workon_branch(bm.workspace_branch)
q = f"""
mutation makeFeatureBranch {{
workonExperimentalBranch(input: {{
owner: "{UT_USERNAME}",
labbookName: "{UT_LBNAME}",
branchName: "{b1.replace('gm', '')}"
}}) {{
currentBranchName
}}
}}
"""
r = client.execute(q)
pprint.pprint(r)
# Cannot delete branch when it's the currently active branch
assert 'errors' in r
assert bm.active_branch == bm.workspace_branch
assert lb.is_repo_clean
def test_workon_feature_branch_success(self, mock_create_labbooks):
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
bm = BranchManager(lb, username=UT_USERNAME)
b1 = bm.create_branch(f"tester1")
bm.workon_branch(bm.workspace_branch)
assert bm.active_branch == 'master'
q = f"""
mutation makeFeatureBranch {{
workonExperimentalBranch(input: {{
owner: "{UT_USERNAME}",
labbookName: "{UT_LBNAME}",
branchName: "{b1}"
}}) {{
labbook{{
name
description
branches {{
branchName
}}
activeBranchName
}}
}}
}}
"""
r = client.execute(q)
assert 'errors' not in r
assert r['data']['workonExperimentalBranch']['labbook']['activeBranchName'] \
== 'tester1'
ab = r['data']['workonExperimentalBranch']['labbook']['branches']
assert set([n['branchName'] for n in ab]) \
== set(['master', 'tester1'])
assert bm.active_branch == 'tester1'
assert lb.is_repo_clean
def test_merge_into_workspace_from_simple_success(self, mock_create_labbooks):
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
bm = BranchManager(lb, username=UT_USERNAME)
og_hash = lb.git.commit_hash
b1 = bm.create_branch(f"test-branch")
FileOperations.makedir(lb, 'code/sillydir1', create_activity_record=True)
FileOperations.makedir(lb, 'code/sillydir2', create_activity_record=True)
branch_hash = lb.git.commit_hash
assert og_hash != branch_hash
bm.workon_branch(bm.workspace_branch)
assert lb.git.log()[0]['commit'] == og_hash
assert not os.path.exists(os.path.join(lb.root_dir, 'code/sillydir1'))
merge_q = f"""
mutation x {{
mergeFromBranch(input: {{
owner: "{UT_USERNAME}",
labbookName: "{UT_LBNAME}",
otherBranchName: "{b1}"
}}) {{
labbook{{
name
description
activeBranchName
}}
}}
}}
"""
r = client.execute(merge_q)
assert 'errors' not in r
assert r['data']['mergeFromBranch']['labbook']['activeBranchName'] \
== 'master'
assert lb.active_branch == bm.workspace_branch
assert os.path.exists(os.path.join(lb.root_dir, 'code/sillydir1'))
assert lb.is_repo_clean
def test_merge_into_feature_from_workspace_simple_success(self, mock_create_labbooks):
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
bm = BranchManager(lb, username=UT_USERNAME)
og_hash = lb.git.commit_hash
b1 = bm.create_branch(f"test-branch")
bm.workon_branch(bm.workspace_branch)
assert lb.active_branch == bm.workspace_branch
og2_hash = lb.git.commit_hash
assert lb.git.commit_hash == og_hash
FileOperations.makedir(lb, 'code/main-branch-dir1', create_activity_record=True)
FileOperations.makedir(lb, 'code/main-branch-dir2', create_activity_record=True)
next_main_hash = lb.git.commit_hash
assert og_hash != next_main_hash
bm.workon_branch(b1)
assert not os.path.exists(os.path.join(lb.root_dir, 'code/main-branch-dir1'))
merge_q = f"""
mutation x {{
mergeFromBranch(input: {{
owner: "{UT_USERNAME}",
labbookName: "{UT_LBNAME}",
otherBranchName: "{bm.workspace_branch}"
}}) {{
labbook{{
name
description
activeBranchName
}}
}}
}}
"""
r = client.execute(merge_q)
assert 'errors' not in r
assert r['data']['mergeFromBranch']['labbook']['activeBranchName'] == 'test-branch'
assert r['data']['mergeFromBranch']['labbook']['name'] == 'unittest-workflow-branch-1'
assert lb.active_branch == b1
assert os.path.exists(os.path.join(lb.root_dir, 'code/main-branch-dir1'))
assert lb.is_repo_clean
def test_conflicted_merge_from_no_force_fail(self, mock_create_labbooks):
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
with open('/tmp/s1.txt', 'w') as s1:
s1.write('original-file\ndata')
FileOperations.insert_file(lb, section='code', src_file=s1.name)
bm = BranchManager(lb, username=UT_USERNAME)
nb = bm.create_branch(f'new-branch')
with open('/tmp/s1.txt', 'w') as s1:
s1.write('branch-conflict-data')
FileOperations.insert_file(lb, section='code', src_file=s1.name)
bm.workon_branch(bm.workspace_branch)
with open('/tmp/s1.txt', 'w') as s1:
s1.write('mainline-conflict-data')
FileOperations.insert_file(lb, section='code', src_file=s1.name)
merge_q = f"""
mutation x {{
mergeFromBranch(input: {{
owner: "{UT_USERNAME}",
labbookName: "{UT_LBNAME}",
otherBranchName: "{nb}"
}}) {{
labbook{{
name
description
activeBranchName
}}
}}
}}
"""
r = client.execute(merge_q)
assert 'errors' in r
assert 'Merge conflict' in r['errors'][0]['message']
def test_conflicted_merge_from_force_success(self, mock_create_labbooks):
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
with open('/tmp/s1.txt', 'w') as s1:
s1.write('original-file\ndata')
FileOperations.insert_file(lb, section='code', src_file=s1.name)
bm = BranchManager(lb, username=UT_USERNAME)
nb = bm.create_branch(f'new-branch')
with open('/tmp/s1.txt', 'w') as s1:
s1.write('branch-conflict-data')
FileOperations.insert_file(lb, section='code', src_file=s1.name)
bm.workon_branch(bm.workspace_branch)
with open('/tmp/s1.txt', 'w') as s1:
s1.write('mainline-conflict-data')
FileOperations.insert_file(lb, section='code', src_file=s1.name)
merge_q = f"""
mutation x {{
mergeFromBranch(input: {{
owner: "{UT_USERNAME}",
labbookName: "{UT_LBNAME}",
otherBranchName: "{nb}",
overrideMethod: "theirs"
}}) {{
labbook{{
name
description
activeBranchName
}}
}}
}}
"""
r = client.execute(merge_q)
assert 'errors' not in r
assert r['data']['mergeFromBranch']['labbook']['activeBranchName'] == 'master'
def test_reflect_deleted_files_on_merge_in(self, mock_create_labbooks):
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
with open('/tmp/s1.txt', 'w') as s1:
s1.write('original-file\ndata')
FileOperations.insert_file(lb, section='code', src_file=s1.name)
bm = BranchManager(lb, username=UT_USERNAME)
nb = bm.create_branch(f'new-branch')
assert os.path.exists(os.path.join(lb.root_dir, 'code', 's1.txt'))
FileOperations.delete_files(lb, 'code', ['s1.txt'])
assert lb.is_repo_clean
assert not os.path.exists(os.path.join(lb.root_dir, 'code', 's1.txt'))
bm.workon_branch(bm.workspace_branch)
assert os.path.exists(os.path.join(lb.root_dir, 'code', 's1.txt'))
merge_q = f"""
mutation x {{
mergeFromBranch(input: {{
owner: "{UT_USERNAME}",
labbookName: "{UT_LBNAME}",
otherBranchName: "{nb}"
}}) {{
labbook{{
name
description
activeBranchName
}}
}}
}}
"""
r = client.execute(merge_q)
assert 'errors' not in r
assert r['data']['mergeFromBranch']['labbook']['activeBranchName'] == 'master'
assert not os.path.exists(os.path.join(lb.root_dir, 'code', 's1.txt'))
def test_create_rollback_branch_remove_linked_dataset(self, mock_create_labbooks):
""" test creating a rollback branch that removes a linked dataset"""
flush_redis_repo_cache()
lb, client = mock_create_labbooks[0], mock_create_labbooks[1]
im = InventoryManager()
ds = im.create_dataset(UT_USERNAME, UT_USERNAME, 'test-ds', storage_type='gigantum_object_v1')
rollback_to = lb.git.commit_hash
# Link dataset to project
im.link_dataset_to_labbook(f"{ds.root_dir}/.git", UT_USERNAME, ds.name, lb, UT_USERNAME)
dataset_dir = os.path.join(lb.root_dir, '.gigantum', 'datasets', UT_USERNAME, 'test-ds')
assert os.path.exists(dataset_dir) is True
q = f"""
mutation makeFeatureBranch {{
createExperimentalBranch(input: {{
owner: "{UT_USERNAME}",
labbookName: "{UT_LBNAME}",
branchName: "rollback-branch",
revision: "{rollback_to}",
description: "testing rollback",
}}) {{
labbook{{
name
activeBranchName
description
branches {{
branchName
}}
linkedDatasets{{
name
}}
}}
}}
}}
"""
r = client.execute(q)
assert 'errors' not in r
assert r['data']['createExperimentalBranch']['labbook']['activeBranchName'] == 'rollback-branch'
assert r['data']['createExperimentalBranch']['labbook']['description'] == "testing rollback"
assert r['data']['createExperimentalBranch']['labbook']['linkedDatasets'] == []
assert lb.is_repo_clean
assert os.path.exists(dataset_dir) is False
@patch('gtmcore.workflows.gitworkflows_utils.create_remote_gitlab_repo', new=helper_create_remote_repo)
def test_commits_ahead_behind(self, fixture_working_dir_lfs_disabled):
with responses.RequestsMock() as rsps:
rsps.add(responses.POST, 'https://test.gigantum.com/api/v1/',
json={'data': {'additionalCredentials': {'gitServiceToken': '<PASSWORD>'}}}, status=200)
client = fixture_working_dir_lfs_disabled[2]
im = InventoryManager()
lb = im.create_labbook(UT_USERNAME, UT_USERNAME, UT_LBNAME, description="tester")
bm = BranchManager(lb, username=UT_USERNAME)
bm.create_branch('new-branch-1')
bm.create_branch('new-branch-2')
bm.workon_branch('master')
q = f"""
{{
labbook(name: "{UT_LBNAME}", owner: "{UT_USERNAME}") {{
branches {{
branchName
isLocal
isRemote
isActive
commitsAhead
commitsBehind
}}
}}
}}
"""
r = client.execute(q)
assert 'errors' not in r
assert len(r['data']['labbook']['branches']) == 3
assert r['data']['labbook']['branches'][0]['branchName'] == 'master'
assert r['data']['labbook']['branches'][0]['isLocal'] is True, "Should be local"
assert r['data']['labbook']['branches'][0]['isRemote'] is False, "not published yet"
assert r['data']['labbook']['branches'][0]['isActive'] is True
assert r['data']['labbook']['branches'][0]['commitsAhead'] == 0
assert r['data']['labbook']['branches'][0]['commitsBehind'] == 0
# Make a remote change!
username = 'default'
wf = LabbookWorkflow(lb)
wf.publish(username=username)
other_user = 'other-test-user'
remote = RepoLocation(wf.remote, other_user)
wf_other = LabbookWorkflow.import_from_remote(remote, username=other_user)
with open(os.path.join(wf_other.repository.root_dir, 'testfile'), 'w') as f:
f.write('filedata')
wf_other.repository.sweep_uncommitted_changes()
wf_other.sync(username=other_user)
r = client.execute(q)
assert 'errors' not in r
assert len(r['data']['labbook']['branches']) == 3
assert r['data']['labbook']['branches'][0]['branchName'] == 'master'
assert r['data']['labbook']['branches'][0]['isLocal'] is True, "Should be local"
assert r['data']['labbook']['branches'][0]['isRemote'] is True, "There should be a remote"
assert r['data']['labbook']['branches'][0]['isActive'] is True
assert r['data']['labbook']['branches'][0]['commitsAhead'] == 0
assert r['data']['labbook']['branches'][0]['commitsBehind'] == 1
# Make a local change!
lb.write_readme("blah")
r = client.execute(q)
assert 'errors' not in r
assert len(r['data']['labbook']['branches']) == 3
assert r['data']['labbook']['branches'][0]['branchName'] == 'master'
assert r['data']['labbook']['branches'][0]['isLocal'] is True, "Should be local"
assert r['data']['labbook']['branches'][0]['isRemote'] is True, "There should be a remote"
assert r['data']['labbook']['branches'][0]['isActive'] is True
assert r['data']['labbook']['branches'][0]['commitsAhead'] == 1
assert r['data']['labbook']['branches'][0]['commitsBehind'] == 1
# Sync
wf.sync(username=username)
r = client.execute(q)
assert 'errors' not in r
assert len(r['data']['labbook']['branches']) == 3
assert r['data']['labbook']['branches'][0]['branchName'] == 'master'
assert r['data']['labbook']['branches'][0]['isLocal'] is True, "Should be local"
assert r['data']['labbook']['branches'][0]['isRemote'] is True, "There should be a remote"
assert r['data']['labbook']['branches'][0]['isActive'] is True
assert r['data']['labbook']['branches'][0]['commitsAhead'] == 0
assert r['data']['labbook']['branches'][0]['commitsBehind'] == 0
|
74677
|
import os
import simplejson as json
from getpass import getpass
from pytezos.crypto import Key
class Keychain:
def __init__(self, path='~/.tezos-client/secret_keys'):
self._path = os.path.expanduser(path)
self._secret_keys = list()
self._last_modified = 0
def reload(self):
last_modified = os.path.getmtime(self._path)
if last_modified > self._last_modified:
self._last_modified = last_modified
with open(self._path, 'r') as f:
self._secret_keys = json.load(f)
def get_key(self, name) -> Key:
self.reload()
value = next(item['value'] for item in self._secret_keys if item['name'] == name)
prefix, key = value.split(':', maxsplit=1)
if prefix == 'encrypted':
password = getpass(f'Please, enter passphrase for `{name}`:\n')
key = Key(key, passphrase=password)
else:
key = Key(key)
return key
def list_keys(self) -> list:
self.reload()
def format_item(item: dict):
prefix, key = item['value'].split(':')
return dict(
name=item['name'],
type=prefix,
curve={'ed': 'ed25519', 'sp': 'secp256k1', 'p2': 'p256'}[key[:2]]
)
return list(map(format_item, self._secret_keys))
|
74696
|
from topaz.module import ClassDef
from topaz.objects.objectobject import W_Object
from topaz.modules.ffi.function import W_FFIFunctionObject
from rpython.rlib import jit
class W_VariadicInvokerObject(W_Object):
classdef = ClassDef('VariadicInvoker', W_Object.classdef)
def __init__(self, space):
W_Object.__init__(self, space)
self.w_info = None
self.w_handle = None
@classdef.singleton_method('allocate')
def singleton_method_allocate(self, space, args_w):
return W_VariadicInvokerObject(space)
@classdef.method('initialize')
def method_initialize(self, space, w_handle, w_arg_types,
w_ret_type, w_options=None):
self.w_ret_type = w_ret_type
self.w_options = w_options
self.w_handle = w_handle
if w_options is None:
w_type_map = space.newhash()
else:
w_key = space.newsymbol('type_map')
w_type_map = space.send(w_options, '[]', [w_key])
space.send(self, 'init', [w_arg_types, w_type_map])
@classdef.method('invoke', arg_values_w='array')
def method_invoke(self, space, w_arg_types, arg_values_w):
w_func_cls = space.getclassfor(W_FFIFunctionObject)
w_func = space.send(
w_func_cls, 'new',
[self.w_ret_type, w_arg_types, self.w_handle, self.w_options])
return self._dli_call(space, w_func, arg_values_w)
@jit.dont_look_inside
def _dli_call(self, space, w_func, arg_values_w):
# XXX we are missing argument promotion for the variadic arguments here
# see
# http://stackoverflow.com/questions/1255775/default-argument-promotions-in-c-function-calls
return space.send(w_func, 'call', arg_values_w)
|
74697
|
from recolor import Core
def main():
# Simulating Protanopia with diagnosed degree of 0.9 and saving the image to file.
Core.simulate(input_path='Examples_Check/ex_original.jpg',
return_type='save',
save_path='Examples_Check/ex_simulate_protanopia.png',
simulate_type='protanopia',
simulate_degree_primary=0.9)
# Simulating deuteranopia with diagnosed degree of 0.9 and saving the image to file.
Core.simulate(input_path='Examples_Check/ex_original.jpg',
return_type='save',
save_path='Examples_Check/ex_simulate_deuteranopia.png',
simulate_type='deuteranopia',
simulate_degree_primary=0.9)
# Simulating Tritanopia with diagnosed degree of 0.9 and saving the image to file.
Core.simulate(input_path='Examples_Check/ex_original.jpg',
return_type='save',
save_path='Examples_Check/ex_simulate_tritanopia.png',
simulate_type='tritanopia',
simulate_degree_primary=0.9)
# Simulating Hybrid (Protanomaly + Deutranomaly) with diagnosed degree of 0.9 and 1.0 and saving the image to file.
Core.simulate(input_path='Examples_Check/ex_original.jpg',
return_type='save',
save_path='Examples_Check/ex_simulate_hybrid.png',
simulate_type='hybrid',
simulate_degree_primary=0.5,
simulate_degree_sec=0.5)
# Correcting Image for Protanopia with diagnosed degree of 1.0 and saving the image to file.
Core.correct(input_path='Examples_Check/ex_original.jpg',
return_type='save',
save_path='Examples_Check/ex_corrected_protanopia.png',
protanopia_degree=0.9,
deuteranopia_degree=0.0)
# Also simulate the corrected image to see difference.
Core.simulate(input_path='Examples_Check/ex_corrected_protanopia.png',
return_type='save',
save_path='Examples_Check/ex_simulate_corrected_protanopia.png',
simulate_type='protanopia',
simulate_degree_primary=0.9)
# Correcting Image for deuteranopia with diagnosed degree of 1.0 and saving the image to file.
Core.correct(input_path='Examples_Check/ex_original.jpg',
return_type='save',
save_path='Examples_Check/ex_corrected_deuteranopia.png',
protanopia_degree=0.0,
deuteranopia_degree=1.0)
# Also simulate the corrected image to see difference.
Core.simulate(input_path='Examples_Check/ex_corrected_deuteranopia.png',
return_type='save',
save_path='Examples_Check/ex_simulate_corrected_deuteranopia.png',
simulate_type='deuteranopia',
simulate_degree_primary=0.9)
# Correcting Image for Hybrid with diagnosed degree of 1.0 for both protanopia and
# deuteranopia and saving the image to file.
Core.correct(input_path='Examples_Check/ex_original.jpg',
return_type='save',
save_path='Examples_Check/ex_corrected_hybrid.png',
protanopia_degree=0.5,
deuteranopia_degree=0.5)
# You can also use different return types and get numpy array or PIL.Image for further processing.
# See recolor.py
return
if __name__ == '__main__':
main()
|
74703
|
from aydin.it.normalisers.base import NormaliserBase
class IdentityNormaliser(NormaliserBase):
"""Identity Normaliser"""
def __init__(self, **kwargs):
"""Constructs a normalisers"""
super().__init__(**kwargs)
def calibrate(self, array):
"""Calibrate method
Parameters
----------
array : numpy.ndarray
"""
self.original_dtype = array.dtype
return None, None
|
74713
|
from django.test import TestCase
from django_hats.bootstrap import Bootstrapper
class RolesTestCase(TestCase):
def setUp(self, *args, **kwargs):
'''Clears `Roles` cache for testing.
'''
for role in Bootstrapper.get_roles():
setattr(role, 'group', None)
return super(RolesTestCase, self).setUp(*args, **kwargs)
|
74726
|
def create_500M_file(name):
native.genrule(
name = name + "_target",
outs = [name],
output_to_bindir = 1,
cmd = "truncate -s 500M $@",
)
|
74739
|
from fastapi import FastAPI
from models import User, db
app = FastAPI()
db.init_app(app)
@app.get("/")
async def root():
# count number of users in DB
return {"hello": "Hello!"}
@app.get("/users")
async def users():
# count number of users in DB
return {"count_users": await db.func.count(User.id).gino.scalar()}
|
74754
|
import numpy as np
import cPickle
import os
import pdb
import cv2
def unpickle(file):
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
def load_data(train_path,order,nb_groups, nb_cl, nb_val,SubMean = False):
xs = []
ys = []
for j in range(1):
d = unpickle(train_path+'cifar-100-python/train')
x = d['data']
y = d['fine_labels']
xs.append(x)
ys.append(y)
d = unpickle(train_path + 'cifar-100-python/test')
xs.append(d['data'])
ys.append(d['fine_labels'])
x = np.concatenate(xs)/np.float32(255)
y = np.concatenate(ys)
#x = np.dstack((x[:, :1024], x[:, 1024:2048], x[:, 2048:]))
x = x.reshape((x.shape[0], 3,32, 32)).transpose(0,2,3,1)
#x = np.transpose(x,(0,2,3,1))
#pdb.set_trace()
#cv2.imwrite("1.jpg",cv2.cvtColor(x[3,:,:,:]*255, cv2.COLOR_RGB2BGR))
#.transpose(0,3,1,2)
# subtract per-pixel mean
pixel_mean = np.mean(x[0:50000],axis=0)
#np.save('cifar_mean.npy',pixel_mean)
#pdb.set_trace()
if SubMean == True:
x -= pixel_mean
#pdb.set_trace()
# Create Train/Validation set
eff_samples_cl = 500-nb_val
X_train = np.zeros((eff_samples_cl*100,32, 32,3))
Y_train = np.zeros(eff_samples_cl*100)
X_valid = np.zeros((nb_val*100,32, 32,3))
Y_valid = np.zeros(nb_val*100)
for i in range(100):
index_y=np.where(y[0:50000]==i)[0]
np.random.shuffle(index_y)
X_train[i*eff_samples_cl:(i+1)*eff_samples_cl] = x[index_y[0:eff_samples_cl],:,:,:]
Y_train[i*eff_samples_cl:(i+1)*eff_samples_cl] = y[index_y[0:eff_samples_cl]]
X_valid[i*nb_val:(i+1)*nb_val] = x[index_y[eff_samples_cl:500],:,:,:]
Y_valid[i*nb_val:(i+1)*nb_val] = y[index_y[eff_samples_cl:500]]
X_test = x[50000:,:,:,:]
Y_test = y[50000:]
files_train = []
train_labels = []
files_valid = []
valid_labels = []
files_test = []
test_labels = []
for _ in range(nb_groups):
files_train.append([])
train_labels.append([])
files_valid.append([])
valid_labels.append([])
files_test.append([])
test_labels.append([])
for i in range(nb_groups):
for i2 in range(nb_cl):
labels_old = Y_train
#pdb.set_trace()
tmp_ind=np.where(labels_old == order[nb_cl*i+i2])[0]
np.random.shuffle(tmp_ind)
files_train[i].extend(X_train[tmp_ind[0:len(tmp_ind)]])
train_labels[i].extend(Y_train[tmp_ind[0:len(tmp_ind)]])
labels_old = Y_valid
tmp_ind=np.where(labels_old == order[nb_cl*i+i2])[0]
np.random.shuffle(tmp_ind)
files_valid[i].extend(X_valid[tmp_ind[0:len(tmp_ind)]])
valid_labels[i].extend(Y_valid[tmp_ind[0:len(tmp_ind)]])
labels_old = Y_test
tmp_ind=np.where(labels_old == order[nb_cl*i+i2])[0]
np.random.shuffle(tmp_ind)
files_test[i].extend(X_test[tmp_ind[0:len(tmp_ind)]])
test_labels[i].extend(Y_test[tmp_ind[0:len(tmp_ind)]])
#pdb.set_trace()
return files_train,train_labels,files_valid,valid_labels,files_test,test_labels
def aug(batch):
# as in paper :
# pad feature arrays with 4 pixels on each side
# and do random cropping of 32x32
#pdb.set_trace()
padded = np.pad(batch,((0,0),(4,4),(4,4),(0,0)),mode='constant')
random_cropped = np.zeros(batch.shape, dtype=np.float32)
crops = np.random.random_integers(0,high=8,size=(batch.shape[0],2))
for r in range(batch.shape[0]):
# Cropping and possible flipping
#if (np.random.randint(2) > 0):
random_cropped[r,:,:,:] = padded[r,crops[r,0]:(crops[r,0]+32),crops[r,1]:(crops[r,1]+32),:]
#else:
#random_cropped[r,:,:,:] = padded[r,crops[r,0]:(crops[r,0]+32),crops[r,1]:(crops[r,1]+32),:][:,:,::-1]
inp_exc = random_cropped
return inp_exc
def balanced_subsample(x,y,subsample_size=1.0):
class_xs = []
min_elems = None
#pdb.set_trace()
for yi in np.unique(y):
elems = x[(y == yi)]
class_xs.append((yi, elems))
if min_elems == None or elems.shape[0] < min_elems:
min_elems = elems.shape[0]
use_elems = min_elems
if subsample_size < 1:
use_elems = int(min_elems*subsample_size)
xs = []
ys = []
#pdb.set_trace()
for ci,this_xs in class_xs:
if len(this_xs) > use_elems:
np.random.shuffle(this_xs)
x_ = this_xs[:use_elems]
y_ = np.empty(use_elems)
y_.fill(ci)
xs.append(x_)
ys.append(y_)
xs = np.concatenate(xs)
ys = np.concatenate(ys)
return xs,ys
|
74768
|
from wayback_machine_archiver.archiver import format_archive_url
def test_archive_org():
BASE = "https://web.archive.org/save/"
URLS = (
"https://alexgude.com",
"http://charles.uno",
)
for url in URLS:
assert BASE + url == format_archive_url(url)
|
74773
|
from kiwoom import config
from kiwoom.config import valid_event
from functools import wraps
from textwrap import dedent
from types import LambdaType
from inspect import (
getattr_static,
ismethod,
isfunction,
isclass,
ismodule
)
class Connector:
"""
Decorator class for mapping empty events to user implementing slots.
This class helps mapping events to specific slots. 'Kiwoom' instance
contains mapping information which had been set from two methods.
1) Kiwoom.connect(event, signal, slot)
2) Kiwoom.set_connect_hook(event, param)
This decorator does not contain those mapping information but only
uses the one defined in instance. This is because decorator should
work on various contexts. The first parameter of wrapper function
in __call__ method, i.e. api, is 'self' argument for Kiwoom object.
This class has three main methods.
1) Connector.map(event)
- act as a decorator for pre-defined Kiwoom events
- below is the usage example
class Kiwoom(API):
...
@map
def on_event_connect(self, err_code):
pass
...
2) Connector.mute(bool) # static method
- warning message can be turned on/off.
3) Connector.connectable(fn) # static method
- Check if given fn is a bounded method to an instance.
- If fn is not a bounded method, it should be static or lambda.
- Bounded method is important to handle continuous information
"""
# Class variable
nargs = {
'on_event_connect': 1,
'on_receive_msg': 4,
'on_receive_tr_data': 5,
'on_receive_real_data': 3,
'on_receive_chejan_data': 3,
'on_receive_condition_ver': 2,
'on_receive_tr_condition': 5,
'on_receive_real_condition': 4
}
def __init__(self):
# If no hook is set, dic[event] returns signal/slot.
# If hook is set, dic[event][key] returns signal/slot.
self._hooks = dict()
self._signals = dict()
self._slots = dict()
self._indices = dict()
def signal(self, event, key=None):
"""
Returns signal methods connected to the event.
If signal and slot are connected to a specific event by Kiwoom.connect() method,
then this method returns the connected signal method. If signal is not connected,
or wrong key is given, raise a KeyError.
'key' is needed when hook is set by Kiwoom.set_connect_hook(). 'key' is set to
be the name of signal method by default unless another string is set on purpose
when connecting.
When requesting data to server is needed, specifically if more data is available,
Kiwoom.signal() returns the exact signal method that can request more data.
:param event: str
One of the pre-defined event names in string. See kiwoom.config.EVENTS.
:param key: str, optional
If hook is set by Kiwoom.set_connect_hook() method and signal is connected
by Kiwoom.connect(), then key is needed. 'key' is set to be name of the
signal method by default unless another 'key' is given when connecting.
:return: method
Signal method connected to the given event. If wrong event, returns None.
"""
if not valid_event(event):
return None
if not self.connect_hook(event):
return self._signals[event]
return self._signals[event][key]
def slot(self, event, key=None):
"""
Returns slot methods connected to the event.
If signal and slot are connected to specific event by Kiwoom.connect() method,
then this method returns the connected slot method. If slot is not connected,
or wrong key is given, this raises a KeyError.
'key' is needed when hook is set by Kiwoom.set_connect_hook(). 'key' is set to
be the name of slot method by default unless another string is set on purpose
when connecting.
When an event is called, Kiwoom.slot() returns the exact slot method that can
handle data received from the event. This method is used in Connector decorator
that wraps events to execute connected slot with the event.
:param event: str
One of the pre-defined event names in string. See kiwoom.config.EVENTS.
:param key: str, optional
If hook is set by Kiwoom.set_connect_hook() method and slot is connected
by Kiwoom.connect(), then key is needed. 'key' is set to be name of the
slot method by default unless another 'key' is given when connecting.
:return: method or None
Slot method connected to the given event. If wrong event, returns None.
"""
if not valid_event(event):
return None
if not self.connect_hook(event):
return self._slots[event]
return self._slots[event][key]
def connect(self, event, signal=None, slot=None, key=None):
"""
Connects signals and slots to one of pre-defined events.
Information saved in this method is used by decorator @Connector() which wraps
the events and automatically calls the right slot connected to the events. In
addition to the decorator, Kiwoom.signal(event, key) and Kiwoom.slot(event, key)
returns the one connected to the event.
1) If no hook is set on the event, then the connected signal/slot can be retrieved
by Kiwoom.signal(event) and Kiwoom.slot(event). There is no need to use key.
2) If hook is set by Kiwoom.set_connect_hook() on the event, in which case there
needs multiple slots to connect on one event, then connection requires a key
which is to be the name of signal/slot methods by default.
The convention to utilizing this module recommends to define the name of related
signal and slot to be the same. Then it becomes easier to manage and develop codes.
Use 'key' arg only when there is a special need. The connected signal/slot can be
retrieved by Kiwoom.signal(event, key='name') and Kiwoom.slot(event, key='name').
Here 'name' can be a method name or special 'key' used in this method.
This method checks whether or not given signal/slot can be called without any
problem. If given method is not bounded to an instance, method should be static
or lambda function. This is because normally 'self' argument is needed to call
methods, therefore method must be bounded to an instance unless given method is
a function.
Please see tutorials example on the link below.
https://github.com/breadum/kiwoom/blob/main/tutorials/4.%20TR%20Data.py
:param event: str
One of the pre-defined event names in string. See kiwoom.config.EVENTS.
:param signal: method, optional
A method that requests to the server
:param slot: method, optional
A method that reacts the server's response
:param key: str, optional
Key is needed only if hook is set by Kiwoom.set_connect_hook() method.
Key is set to be name of the given signal and/or slot method by default.
If key is given other than method name, the connected signal can be
retrieved by Kiwoom.siganl(event, key) and slot by Kiwoom.slot(event, key)
"""
valid = False
connectable = Connector.connectable
if not valid_event(event):
return
# Directly connect slot to the event
if not self.connect_hook(event):
# Key can't be used here
if key is not None:
raise RuntimeError(
"Key can't be used. Remove key argument or Try to set_connect_hook() first."
)
elif connectable(signal):
if connectable(slot):
valid = True
self._signals[event] = signal
self._slots[event] = slot
elif connectable(slot):
valid = True
self._slots[event] = slot
# Connect slot to the event when
else:
if connectable(signal):
if connectable(slot):
valid = True
# Key other than method's name
if key is not None:
self._signals[event][key] = signal
self._slots[event][key] = slot
# Default key is method's name
else:
self._signals[event][getattr(signal, '__name__')] = signal
self._slots[event][getattr(slot, '__name__')] = slot
elif connectable(slot):
valid = True
if key is not None:
self._slots[event][key] = slot
else:
self._slots[event][getattr(slot, '__name__')] = slot
# Nothing is connected
if not valid:
raise RuntimeError(f"Unsupported combination of inputs. Please read below.\n\n{self.connect.__doc__}")
def connect_hook(self, event):
"""
Returns whether a hook is set for given event.
:param event: str
One of the pre-defined event names in string. See kiwoom.config.EVENTS.
:return: bool
"""
if event in self._hooks:
return True
return False
def set_connect_hook(self, event, param):
"""
Set parameter defined in event as a hook to find the right slot when event is called.
When an event needs multiple slots to connect, depending on specific tasks, set
a hook(key) to select which slot to map. The hook must be one of the parameters
in the definition of the event method. Parameters can be found by help built-in
function or Kiwoom.api_arg_spec(event). This raises a KeyError if given param is
not defined in event method.
If hook is set to the given parameter, argument passed into the parameter when
the event is called, is going to be a key to connect event, signal and slot.
Convention is that the name of signal and slot that deal with the related task
is recommended to be the same, so that 'key' is set to be the method name of
signal and slot by default. See examples on the tutorials link below.
https://github.com/breadum/kiwoom/blob/main/tutorials/5.%20TR%20Data.py
:param event: str
One of the pre-defined event names in string. See kiwoom.config.EVENTS.
:param param: str
Parameter name defined in given event. To see all parameters to event,
use Kiwoom.api_arg_spec(event) method or help(...) built-in function.
"""
if not valid_event(event):
return
# To check given arg is valid
from kiwoom import Kiwoom # lazy import
args = Kiwoom.api_arg_spec(event)
if param not in args:
raise KeyError(f"{param} is not valid.\nSelect one of {args}.")
# To set connect hook and its index in args
self._hooks[event] = param
self._indices[event] = list(args.keys()).index(param) - 1 # except 'self'
# Initialize structure to get signal/slot method by dic[event][key]
self._signals[event] = dict()
self._slots[event] = dict()
def get_connect_hook(self, event):
"""
Returns a hook (i.e. name of parameter) set in given event.
:param event: str
One of the pre-defined event names in string. See kiwoom.config.EVENTS.
:return: str or None
If exists, returns hook in string else None. If not a valid event is given,
this returns None.
"""
if event not in self._hooks:
return None
return self._hooks[event]
def remove_connect_hook(self, event):
"""
Remove hook which is set in given event if exists.
This method removes all information of signals and slots connected to given
event as well as hook. If hook of given event does not exist, this raises
a KeyError.
:param event: str
One of the pre-defined event names in string. See kiwoom.config.EVENTS.
"""
del self._hooks[event]
del self._signals[event]
del self._slots[event]
del self._indices[event]
def get_hook_index(self, event):
"""
Returns index of hook in method arguments
:param event: str
:return: int
"""
if event in self._indices:
return self._indices[event]
return None
@staticmethod
def map(ehandler):
"""
Decorator method to call slot method when event connected to slot has been called.
When event has been called, this decorator method is called with event method as arg,
'ehandler'. Then wrapper function wraps the event and will be executed.
Inside the wrapper, it takes all args from the event. The First arg is 'self' which is
an instance of Kiwoom class. The rest of args depends on which event has been called.
Firstly, execute event handler which is initially an empty method in the module. But
this process is needed for when an empty default method is overridden. Then, find the
right slot connected to the event. If found, execute it with the same args forwarded
from event. If not, just print a warning message. This message can be turned on/off.
Usage example
>> class Kiwoom(API):
>> ...
>> @map # decorator
>> def on_event_connect(self, err_code):
>> pass # empty event
>> ...
:param ehandler: method
One of pre-defined event handlers, see kiwoom.config.EVENTS.
:return: function
Wrapper function that executes a slot method connected to the event.
"""
@wraps(ehandler) # keep docstring of event handler
def wrapper(api, *args):
# Variables
event = getattr(ehandler, '__name__')
idx = api.get_hook_index(event)
hook = api.get_connect_hook(event)
args = args[:Connector.nargs[event]]
# To execute the default event handler in case of overriding
ehandler(api, *args)
# To find connected slot
try:
# If hook is set on the event, then key becomes arg that corresponds to the hook
# ex) if hook is rq_name for on_receive_tr_data, then key becomes arg passed into rq_name
key = args[idx] if hook else None
# To retrieve the right slot
slot = api.slot(event, key)
except KeyError:
if not config.MUTE:
msg = dedent(
f"""
kiwoom.{event}({', '.join(map(str, args))}) has been called.
But the event handler, '{event}', is not connected to any slot.
Please try to connect event and slot by using kiwoom.connect() method.
>> api.connect('{event}', slot=slot_method)
This warning message can disappear by the following.
>> kiwoom.config.MUTE = True # global variable
"""
)
print(msg)
# To prevent executing slot that does not exist, just return
return
# Execute the connected slot
slot(*args)
# Return wrapper function to decorate
return wrapper
@classmethod
def mute(cls, bool):
"""
Class method to turn on/off printing warning message of no connected slot to an event.
Usage example
>> Connector.mute(True/False)
:param bool: bool
If True, no warning message else warning.
"""
config.MUTE = bool
@staticmethod
def connectable(fn):
"""
Static function that checks given fn is callable and bounded method.
When event is called, a slot mapped to the event is to be called by decorator.
If fn is not a static function nor lambda function, fn needs 'self' argument to
work correctly. This is why fn should be bounded method to instance object.
Bounded method contains 'self' argument by itself. This method is used in
Kiwoom.connect(event, signal, slot) to check validity before making connections.
When given fn is not valid to connect, this function raises TypeError.
:param fn: method, function or None
Any callables to be tested. None is exceptionally accepted.
:return: bool
If a callable can be executed without any problem, returns True.
"""
# None by default False
if fn is None:
return False
# Instance method, Class method
if ismethod(fn):
return True
# Normal function, Class function, Lambda function
if isfunction(fn):
# Lambda function
if isinstance(fn, LambdaType):
return True
qname = getattr(fn, '__qualname__')
if '.' in qname:
idx = qname.rfind('.')
prefix, fname = qname[:idx], qname[idx + 1:]
obj = eval(prefix)
# Normal function
if ismodule(obj):
return True
# Class function
if isclass(obj):
# Static method
if isinstance(getattr_static(obj, fname), staticmethod):
return True
# Class function should be bound to an instance
if not hasattr(fn, '__self__'):
raise TypeError(dedent(
f"""
Given '{fname}' must be instance method, not function.
Try to make an instance first as followings.
>> var = {prefix}(*args, **kwargs)
>> kiwoom.connect(.., var.{fname}, ..)
"""
)) # False
# Just a callable object (a class that has '__call__')
elif callable(fn):
# Static call
if isinstance(getattr_static(fn, '__call__'), staticmethod):
return True
# Non-static call should be bound to an instance
if not hasattr(fn, '__self__'):
raise TypeError(dedent(
f"""
Given '{getattr(fn, '__qualname__')} must be bound to instance.
Try to make an instance first as followings.
>> var = {getattr(fn, '__name__')}(*args, **kwargs)
>> kiwoom.connect(.., var, ..)
"""
)) # False
# Not a valid argument
else:
from kiwoom.core.kiwoom import Kiwoom # Dynamic import to avoid circular import
raise TypeError(
f"Unsupported type, {type(fn)}. Please try with valid args.\n\n{Kiwoom.connect.__doc__}."
) # False
|
74792
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, BigInteger, String
import config
Base = declarative_base()
formFields = config.form_fields
class Register(Base):
__tablename__ = config.table_name
col1 = Column(formFields[0]['fname'], formFields[0]['ftype'], primary_key=True)
col2 = Column(formFields[1]['fname'], formFields[1]['ftype'], nullable=formFields[1]['null'])
col3 = Column(formFields[2]['fname'], formFields[2]['ftype'], nullable=formFields[2]['null'])
def __repr__(self):
return "Register(name ={}, email = {}, number = {})"\
.format(self.col1, self.col2, self.col3)
|
74809
|
import logging
import sys
def get_logger(filename,
logger_name='centroFlye',
level=logging.INFO,
filemode='a',
stdout=True):
logger = logging.getLogger(logger_name)
logger.setLevel(level)
# create the logging file handler
fh = logging.FileHandler(filename, mode=filemode)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add handler to logger object
logger.addHandler(fh)
if stdout:
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
|
74821
|
import os, sys
sys.path.insert(0, os.path.join(os.pardir, 'src'))
def sympy_solution():
from sympy import symbols, Rational, solve
C1, C3, C4 = symbols('C1 C3 C4')
s = solve([C1 - 1 - C3,
C1 - Rational(1,2) - C3 - C4,
2 + 2*C3 + C4], [C1,C3,C4])
return s
import numpy as np
import matplotlib.pyplot as plt
def plot_exact_solution():
x = np.linspace(0, 2, 101)
u = exact_solution(x)
plt.plot(x, u)
plt.xlabel('$x$'); plt.ylabel('$u$')
ax = plt.gca(); ax.set_aspect('equal')
plt.savefig('tmp.png'); plt.savefig('tmp.pdf')
def exact_solution(x):
if isinstance(x, np.ndarray):
return np.where(x < 1, -1./4*x, 0.5*x**2 - 5./4*x + 0.5)
else:
return -1./4*x if x < 1 else 0.5*x**2 - 5./4*x + 0.5
def sine_solution(x, N):
from numpy import pi, sin
s = 0
u = [] # u[i] is the solution for N=i
for i in range(N+1):
if i % 4 == 0:
cos_min_cos = -1
elif (i-1) % 4 == 0:
cos_min_cos = 2
elif (i-2) % 4 == 0:
cos_min_cos = -1
elif (i-1) % 4 == 0:
cos_min_cos = 0
b_i = 2/(pi*(i+1))*cos_min_cos
A_ii = (i+1)**2*pi**2/4
c_i = b_i/A_ii
s += c_i*sin((i+1)*x*pi/2)
u.append(s.copy())
return u
def plot_sine_solution():
x = np.linspace(0, 2, 101)
u = sine_solution(x, N=20)
plt.figure()
x = np.linspace(0, 2, 101)
plt.plot(x, exact_solution(x), '--')
N_values = 0, 1, 5
for N in 0, 1, 5, 10:
plt.plot(x, u[N])
plt.legend(['exact'] + ['N=%d' % N for N in N_values])
plt.savefig('tmp2.png'); plt.savefig('tmp2.pdf')
def P1_solution():
plt.figure()
from fe1D import mesh_uniform, u_glob
N_e_values = [2, 4, 8]
d = 1
legends = []
for N_e in N_e_values:
vertices, cells, dof_map = mesh_uniform(
N_e=N_e, d=d, Omega=[0,2], symbolic=False)
h = vertices[1] - vertices[0]
Ae = 1./h*np.array(
[[1, -1],
[-1, 1]])
N = N_e + 1
A = np.zeros((N, N))
b = np.zeros(N)
for e in range(N_e):
if vertices[e] >= 1:
be = -h/2.*np.array(
[1, 1])
else:
be = h/2.*np.array(
[0, 0])
for r in range(d+1):
for s in range(d+1):
A[dof_map[e][r], dof_map[e][s]] += Ae[r,s]
b[dof_map[e][r]] += be[r]
# Enforce boundary conditions
A[0,:] = 0; A[0,0] = 1; b[0] = 0
A[-1,:] = 0; A[-1,-1] = 1; b[-1] = 0
c = np.linalg.solve(A, b)
# Plot solution
print('c:', c)
print('vertices:', vertices)
print('cells:', cells)
print('len(cells):', len(cells))
print('dof_map:', dof_map)
xc, u, nodes = u_glob(c, vertices, cells, dof_map)
plt.plot(xc, u)
legends.append('$N_e=%d$' % N_e)
plt.plot(xc, exact_solution(xc), '--')
legends.append('exact')
plt.legend(legends, loc='lower left')
plt.savefig('tmp3.png'); plt.savefig('tmp3.pdf')
if __name__ == '__main__':
print(sympy_solution())
plot_sine_solution()
P1_solution()
plt.show()
|
74847
|
import pytest
from requests.exceptions import HTTPError
from commercetools.platform import models
def test_channel_get_by_id(old_client):
channel = old_client.channels.create(
models.ChannelDraft(
key="test-channel", roles=[models.ChannelRoleEnum.INVENTORY_SUPPLY]
)
)
assert channel.id
assert channel.key == "test-channel"
channel = old_client.channels.get_by_id(channel.id)
assert channel.id
assert channel.key == "test-channel"
with pytest.raises(HTTPError):
old_client.channels.get_by_id("invalid")
def test_channel_query(old_client):
old_client.channels.create(
models.ChannelDraft(
key="test-channel1", roles=[models.ChannelRoleEnum.INVENTORY_SUPPLY]
)
)
old_client.channels.create(
models.ChannelDraft(
key="test-channel2", roles=[models.ChannelRoleEnum.INVENTORY_SUPPLY]
)
)
# single sort query
result = old_client.channels.query(sort="id asc")
assert len(result.results) == 2
assert result.total == 2
# multiple sort queries
result = old_client.channels.query(sort=["id asc", "name asc"])
assert len(result.results) == 2
assert result.total == 2
def test_channel_update(old_client):
"""Test the return value of the update methods.
It doesn't test the actual update itself.
TODO: See if this is worth testing since we're using a mocking backend
"""
channel = old_client.channels.create(
models.ChannelDraft(
key="test-channel",
name=models.LocalizedString(nl="nl-channel"),
roles=[models.ChannelRoleEnum.INVENTORY_SUPPLY],
)
)
assert channel.key == "test-channel"
channel = old_client.channels.update_by_id(
id=channel.id,
version=channel.version,
actions=[
models.ChannelChangeNameAction(
name=models.LocalizedString(nl="nl-channel2")
)
],
)
|
74865
|
import numpy as np
import torch
from scipy.stats import entropy as sc_entropy
class MultipredictionEntropy:
def __int__(self):
"""
Computes the entropy on multiple predictions of the same batch.
"""
super(MultipredictionEntropy, self).__init__()
def __call__(self, y, device='cpu'):
entr = []
for y in torch.argmax(y, dim=-1).transpose(dim0=0, dim1=1):
entr += [sc_entropy((np.unique(y, return_counts=True)[1] / y.shape[-1]), base=2)]
return torch.tensor(entr)
if __name__ == '__main__':
y = torch.tensor(
[
[ # pred 1
[.7, .3, .1],
[.7, .3, .1],
[.7, .3, .2]
],
[ # pred 2
[.4, .6, .3],
[.4, .6, .4],
[.6, .4, .3]
],
[ # pred 3
[.4, .6, .2],
[.6, .4, .8],
[.6, .4, .7]
],
[ # pred 4
[.1, .9, .3],
[.1, .9, .3],
[.1, .9, .3]
]
]
)
entropy_estimation = MultipredictionEntropy()
print(entropy_estimation(y))
|
74888
|
from django.contrib import admin
# Register your models here.
from training.models import Training
admin.site.register(Training)
|
74906
|
import unittest
from os.path import join, dirname
from io import BytesIO
from urllib.parse import quote
from falcon_heavy.http.multipart_parser import MultiPartParser, MultiPartParserError
from falcon_heavy.http.exceptions import RequestDataTooBig, TooManyFieldsSent
from falcon_heavy.http.utils import parse_options_header
UNICODE_FILENAME = 'test-0123456789_中文_Orléans.jpg'
def get_contents(filename):
with open(filename, 'rb') as f:
return f.read()
class TestMultipartParser(unittest.TestCase):
def test_limiting(self):
payload = (
b'--foo\r\nContent-Disposition: form-field; name=foo\r\n\r\n'
b'Hello World\r\n'
b'--foo\r\nContent-Disposition: form-field; name=bar\r\n\r\n'
b'bar=baz\r\n--foo--'
)
parser = MultiPartParser(
stream=BytesIO(payload),
content_type='multipart/form-data; boundary=foo',
content_length=len(payload),
data_upload_max_memory_size=4
)
with self.assertRaises(RequestDataTooBig):
parser.parse()
parser = MultiPartParser(
stream=BytesIO(payload),
content_type='multipart/form-data; boundary=foo',
content_length=len(payload),
data_upload_max_memory_size=400
)
form, _ = parser.parse()
self.assertEqual(u'Hello World', form['foo'].value)
parser = MultiPartParser(
stream=BytesIO(payload),
content_type='multipart/form-data; boundary=foo',
content_length=len(payload),
data_upload_max_number_fields=1
)
with self.assertRaises(TooManyFieldsSent):
parser.parse()
payload = (
b'--foo\r\nContent-Disposition: form-field; name=foo\r\n\r\n'
b'Hello World\r\n'
b'--foo\r\nContent-Disposition: form-field; name=bar; filename=Grateful Dead\r\n\r\n'
b'aoxomoxoa\r\n--foo--'
)
parser = MultiPartParser(
stream=BytesIO(payload),
content_type='multipart/form-data; boundary=foo',
content_length=len(payload),
data_upload_max_memory_size=4
)
with self.assertRaises(RequestDataTooBig):
parser.parse()
def test_missing_multipart_boundary(self):
with self.assertRaises(MultiPartParserError) as ctx:
MultiPartParser(
stream=BytesIO(b''),
content_type='multipart/form-data',
content_length=0
)
self.assertIn("Invalid boundary in multipart", str(ctx.exception))
def test_invalid_multipart_content(self):
payload = b'bar'
parser = MultiPartParser(
stream=BytesIO(payload),
content_type='multipart/form-data; boundary="foo"',
content_length=len(payload)
)
with self.assertRaises(MultiPartParserError) as ctx:
parser.parse()
self.assertEqual("Expected boundary at start of multipart data", str(ctx.exception))
def test_empty_content(self):
parser = MultiPartParser(
stream=BytesIO(b''),
content_type='multipart/form-data; boundary=foo',
content_length=0
)
form, files = parser.parse()
self.assertEqual(0, len(form))
self.assertEqual(0, len(files))
def test_invalid_content_length(self):
with self.assertRaises(MultiPartParserError) as ctx:
MultiPartParser(
stream=BytesIO(b''),
content_type='multipart/form-data; boundary=foo',
content_length=-1
)
self.assertIn("Invalid content length", str(ctx.exception))
def test_basic(self):
resources = join(dirname(__file__), 'fixtures')
repository = (
('firefox3-2png1txt', '---------------------------186454651713519341951581030105', (
(u'anchor.png', 'file1', 'image/png', 'file1.png'),
(u'application_edit.png', 'file2', 'image/png', 'file2.png')
), u'example text'),
('firefox3-2pnglongtext', '---------------------------14904044739787191031754711748', (
(u'accept.png', 'file1', 'image/png', 'file1.png'),
(u'add.png', 'file2', 'image/png', 'file2.png')
), u'--long text\r\n--with boundary\r\n--lookalikes--'),
('opera8-2png1txt', '----------zEO9jQKmLc2Cq88c23Dx19', (
(u'arrow_branch.png', 'file1', 'image/png', 'file1.png'),
(u'award_star_bronze_1.png', 'file2', 'image/png', 'file2.png')
), u'blafasel öäü'),
('webkit3-2png1txt', '----WebKitFormBoundaryjdSFhcARk8fyGNy6', (
(u'gtk-apply.png', 'file1', 'image/png', 'file1.png'),
(u'gtk-no.png', 'file2', 'image/png', 'file2.png')
), u'this is another text with ümläüts'),
('ie6-2png1txt', '---------------------------7d91b03a20128', (
(u'file1.png', 'file1', 'image/x-png', 'file1.png'),
(u'file2.png', 'file2', 'image/x-png', 'file2.png')
), u'ie6 sucks :-/')
)
for name, boundary, files, text in repository:
folder = join(resources, name)
payload = get_contents(join(folder, 'request.txt'))
for filename, field, content_type, fsname in files:
parser = MultiPartParser(
stream=BytesIO(payload),
content_type='multipart/form-data; boundary="%s"' % boundary,
content_length=len(payload)
)
form, files = parser.parse()
if filename:
self.assertEqual(filename, files[field].filename)
self.assertEqual(content_type, files[field].content_type)
self.assertEqual(get_contents(join(folder, fsname)), files[field].stream.read())
else:
self.assertEqual(filename, form[field].filename)
self.assertEqual(content_type, form[field].content_type)
self.assertEqual(get_contents(join(folder, fsname)), form[field])
def test_ie7_unc_path(self):
payload_file = join(dirname(__file__), 'fixtures', 'ie7_full_path_request.txt')
payload = get_contents(payload_file)
boundary = '---------------------------7da36d1b4a0164'
parser = MultiPartParser(
stream=BytesIO(payload),
content_type='multipart/form-data; boundary="%s"' % boundary,
content_length=len(payload)
)
form, files = parser.parse()
self.assertEqual(u'Sellersburg Town Council Meeting 02-22-2010doc.doc',
files['cb_file_upload_multiple'].filename)
def test_end_of_file(self):
payload = (
b'--foo\r\n'
b'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n'
b'Content-Type: text/plain\r\n\r\n'
b'file contents and no end'
)
parser = MultiPartParser(
stream=BytesIO(payload),
content_type='multipart/form-data; boundary=foo',
content_length=len(payload)
)
with self.assertRaises(MultiPartParserError) as ctx:
parser.parse()
self.assertEqual(u'Unexpected end of part', str(ctx.exception))
def test_broken_base64(self):
payload = (
b'--foo\r\n'
b'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n'
b'Content-Transfer-Encoding: base64\r\n'
b'Content-Type: text/plain\r\n\r\n'
b'error'
b'--foo--'
)
parser = MultiPartParser(
stream=BytesIO(payload),
content_type='multipart/form-data; boundary=foo',
content_length=len(payload)
)
with self.assertRaises(MultiPartParserError) as ctx:
parser.parse()
self.assertIn(u'Could not decode base64 data', str(ctx.exception))
def test_file_no_content_type(self):
payload = (
b'--foo\r\n'
b'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n\r\n'
b'file contents\r\n--foo--'
)
parser = MultiPartParser(
stream=BytesIO(payload),
content_type='multipart/form-data; boundary=foo',
content_length=len(payload)
)
_, files = parser.parse()
self.assertEqual(u'test.txt', files['test'].filename)
self.assertEqual(b'file contents', files['test'].stream.read())
def test_extra_newline(self):
payload = (
b'\r\n\r\n--foo\r\n'
b'Content-Disposition: form-data; name="foo"\r\n\r\n'
b'a string\r\n'
b'--foo--'
)
parser = MultiPartParser(
stream=BytesIO(payload),
content_type='multipart/form-data; boundary=foo',
content_length=len(payload)
)
form, _ = parser.parse()
self.assertEqual(u'a string', form['foo'].value)
def test_headers(self):
payload = (
b'--foo\r\n'
b'Content-Disposition: form-data; name="foo"; filename="foo.txt"\r\n'
b'X-Custom-Header: blah\r\n'
b'Content-Type: text/plain; charset=utf-8\r\n\r\n'
b'file contents, just the contents\r\n'
b'--foo--'
)
parser = MultiPartParser(
stream=BytesIO(payload),
content_type='multipart/form-data; boundary=foo',
content_length=len(payload)
)
_, files = parser.parse()
self.assertEqual('text/plain; charset=utf-8', files['foo'].content_type)
self.assertEqual(files['foo'].content_type, files['foo'].headers['content-type'])
self.assertEqual('blah', files['foo'].headers['x-custom-header'])
payload = (
b'--foo\r\n'
b'Content-Disposition: form-data; name="foo"\r\n'
b'X-Custom-Header: blah\r\n'
b'Content-Type: application/json; charset=utf-8\r\n\r\n'
b'314\r\n'
b'--foo--'
)
parser = MultiPartParser(
stream=BytesIO(payload),
content_type='multipart/form-data; boundary=foo',
content_length=len(payload)
)
form, _ = parser.parse()
self.assertEqual('314', form['foo'].value)
self.assertEqual('application/json; charset=utf-8', form['foo'].content_type)
self.assertEqual(form['foo'].content_type, form['foo'].headers['content-type'])
self.assertEqual('blah', form['foo'].headers['x-custom-header'])
def test_empty_multipart(self):
payload = b'--boundary--'
parser = MultiPartParser(
stream=BytesIO(payload),
content_type='multipart/form-data; boundary=boundary',
content_length=len(payload)
)
form, files = parser.parse()
self.assertEqual(0, len(form))
self.assertEqual(0, len(files))
def test_unicode_file_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = (
'--foo\r\n'
'Content-Disposition: form-data; name="file_unicode"; filename*=UTF-8\'\'{}\r\n'
'Content-Type: application/octet-stream\r\n\r\n'
'You got pwnd.\r\n'
'\r\n--foo--\r\n'
).format(quote(UNICODE_FILENAME))
parser = MultiPartParser(
stream=BytesIO(payload.encode()),
content_type='multipart/form-data; boundary="foo"',
content_length=len(payload)
)
_, files = parser.parse()
self.assertEqual(UNICODE_FILENAME, files['file_unicode'].filename)
def test_rfc2231_unicode_name(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = (
'--foo\r\n'
'Content-Disposition: form-data; name*=UTF-8\'\'file_unicode; filename*=UTF-8\'\'{}\r\n'
'Content-Type: application/octet-stream\r\n\r\n'
'You got pwnd.\r\n'
'\r\n--foo--\r\n'
).format(quote(UNICODE_FILENAME))
parser = MultiPartParser(
stream=BytesIO(payload.encode()),
content_type='multipart/form-data; boundary="foo"',
content_length=len(payload)
)
_, files = parser.parse()
self.assertEqual(UNICODE_FILENAME, files['file_unicode'].filename)
def test_blank_filenames(self):
"""
Receiving file upload when filename is blank (before and after
sanitization) should be okay.
"""
# The second value is normalized to an empty name by
# MultiPartParser.IE_sanitize()
filenames = ['', 'C:\\Windows\\']
payload = ''
for i, name in enumerate(filenames):
payload += (
'--foo\r\n'
'Content-Disposition: form-data; name="file{}"; filename="{}"\r\n'
'Content-Type: application/octet-stream\r\n\r\n'
'You got pwnd.\r\n'
).format(i, name)
payload += '\r\n--foo--\r\n'
parser = MultiPartParser(
stream=BytesIO(payload.encode()),
content_type='multipart/form-data; boundary="foo"',
content_length=len(payload)
)
_, files = parser.parse()
self.assertEqual(0, len(files))
def test_dangerous_file_names(self):
"""Uploaded file names should be sanitized before ever reaching the view."""
# This test simulates possible directory traversal attacks by a
# malicious uploader We have to do some monkeybusiness here to construct
# a malicious payload with an invalid file name (containing os.sep or
# os.pardir). This similar to what an attacker would need to do when
# trying such an attack.
scary_file_names = [
"/tmp/hax0rd.txt", # Absolute path, *nix-style.
"C:\\Windows\\hax0rd.txt", # Absolute path, win-style.
"C:/Windows/hax0rd.txt", # Absolute path, broken-style.
"\\tmp\\hax0rd.txt", # Absolute path, broken in a different way.
"/tmp\\hax0rd.txt", # Absolute path, broken by mixing.
"subdir/hax0rd.txt", # Descendant path, *nix-style.
"subdir\\hax0rd.txt", # Descendant path, win-style.
"sub/dir\\hax0rd.txt", # Descendant path, mixed.
"../../hax0rd.txt", # Relative path, *nix-style.
"..\\..\\hax0rd.txt", # Relative path, win-style.
"../..\\hax0rd.txt" # Relative path, mixed.
]
payload = ''
for i, name in enumerate(scary_file_names):
payload += (
'--foo\r\n'
'Content-Disposition: form-data; name="file{}"; filename="{}"\r\n'
'Content-Type: application/octet-stream\r\n\r\n'
'You got pwnd.\r\n'
).format(i, name)
payload += '\r\n--foo--\r\n'
parser = MultiPartParser(
stream=BytesIO(payload.encode()),
content_type='multipart/form-data; boundary="foo"',
content_length=len(payload)
)
_, files = parser.parse()
# The filenames should have been sanitized by the time it got to the view.
for i, name in enumerate(scary_file_names):
got = files['file%s' % i]
self.assertEqual('hax0rd.txt', got.filename)
def test_filename_overflow(self):
"""File names over 256 characters (dangerous on some platforms) get fixed up."""
long_str = 'f' * 300
cases = [
# field name, filename, expected
('long_filename', '%s.txt' % long_str, '%s.txt' % long_str[:251]),
('long_extension', 'foo.%s' % long_str, '.%s' % long_str[:254]),
('no_extension', long_str, long_str[:255]),
('no_filename', '.%s' % long_str, '.%s' % long_str[:254]),
('long_everything', '%s.%s' % (long_str, long_str), '.%s' % long_str[:254]),
]
payload = ''
for name, filename, _ in cases:
payload += (
'--foo\r\n'
'Content-Disposition: form-data; name="{}"; filename="{}"\r\n'
'Content-Type: application/octet-stream\r\n\r\n'
'Oops.\r\n'
).format(name, filename)
payload += '\r\n--foo--\r\n'
parser = MultiPartParser(
stream=BytesIO(payload.encode()),
content_type='multipart/form-data; boundary="foo"',
content_length=len(payload)
)
_, files = parser.parse()
for name, _, expected in cases:
got = files[name]
self.assertEqual(expected, got.filename, 'Mismatch for {}'.format(name))
self.assertLess(len(got.filename), 256,
"Got a long file name (%s characters)." % len(got.filename))
def test_rfc2231_parsing(self):
test_data = (
(b"Content-Type: application/x-stuff; title*=us-ascii'en-us'This%20is%20%2A%2A%2Afun%2A%2A%2A",
u"This is ***fun***"),
(b"Content-Type: application/x-stuff; title*=UTF-8''foo-%c3%a4.html",
u"foo-ä.html"),
(b"Content-Type: application/x-stuff; title*=iso-8859-1''foo-%E4.html",
u"foo-ä.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_options_header(raw_line)
self.assertEqual(parsed[1]['title'], expected_title)
def test_rfc2231_wrong_title(self):
"""
Test wrongly formatted RFC 2231 headers (missing double single quotes).
Parsing should not crash (#24209).
"""
test_data = (
(b"Content-Type: application/x-stuff; title*='This%20is%20%2A%2A%2Afun%2A%2A%2A",
"'This%20is%20%2A%2A%2Afun%2A%2A%2A"),
(b"Content-Type: application/x-stuff; title*='foo.html",
"'foo.html"),
(b"Content-Type: application/x-stuff; title*=bar.html",
"bar.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_options_header(raw_line)
self.assertEqual(parsed[1]['title'], expected_title)
if __name__ == '__main__':
unittest.main()
|
74936
|
from __future__ import division
import datetime
import time
from PyQt5.QtCore import Qt
from PyQt5 import QtWidgets
from chainer.training import extension
try:
from chainer.training.triggers import interval
except ImportError:
from chainer.training.triggers import interval_trigger as interval
class CWProgressBar(extension.Extension, QtWidgets.QDialog):
def __init__(self, epoch, update_interval=100, *args):
self._update_interval = update_interval
self._recent_timing = []
self.stop_trigger = None
self.interval_trigger = interval.IntervalTrigger(epoch, 'epoch')
self.epoch = epoch
super(CWProgressBar, self).__init__(*args)
self.setWindowTitle('progress')
main_layout = QtWidgets.QVBoxLayout()
self.pbar = QtWidgets.QProgressBar()
self.pbar.setGeometry(25, 40, 200, 25)
main_layout.addWidget(self.pbar)
self._stat_label = QtWidgets.QLabel('')
main_layout.addWidget(self._stat_label)
self._est_label = QtWidgets.QLabel('')
main_layout.addWidget(self._est_label)
stop_button = QtWidgets.QPushButton('Stop')
stop_button.clicked.connect(self.finalize)
main_layout.addWidget(stop_button)
self.setLayout(main_layout)
self.setWindowFlags(Qt.WindowStaysOnTopHint)
self.setStyleSheet('''SettingsDialog {
background: rgb(75,75,75);
}
QPushButton {
background-color: rgb(205,85,85);
color: white;
}
QLabel {
color: black;
}
''')
self.show()
self.raise_()
def __call__(self, trainer):
# initialize some attributes at the first call
training_length = self.epoch, 'epoch'
stat_template = (
'{0.iteration:10} iter, {0.epoch} epoch / %s %ss\n' %
training_length)
length, unit = training_length
iteration = trainer.updater.iteration
self.pbar.setRange(0, length)
# print the progress bar
if iteration % self._update_interval == 0:
epoch = trainer.updater.epoch_detail
recent_timing = self._recent_timing
now = time.time()
recent_timing.append((iteration, epoch, now))
self.pbar.setValue(epoch)
self._stat_label.setText(stat_template.format(trainer.updater))
# out.write(status)
old_t, old_e, old_sec = recent_timing[0]
span = now - old_sec
if span != 0:
speed_t = (iteration - old_t) / span
speed_e = (epoch - old_e) / span
else:
speed_t = float('inf')
speed_e = float('inf')
if unit == 'iteration':
estimated_time = (length - iteration) / speed_t
else:
estimated_time = (length - epoch) / speed_e
self._est_label.setText('{:10.5g} iters/sec. '
'Estimated time to finish: {}.\n'
.format(speed_t,
datetime.timedelta(seconds=estimated_time)))
if len(recent_timing) > 100:
del recent_timing[0]
QtWidgets.QApplication.instance().processEvents()
def finalize(self):
# delete the progress bar and exit training
self.stop_trigger = True
super(CWProgressBar, self).close()
def get_stop_trigger(self, trainer):
return self.stop_trigger or self.interval_trigger(trainer)
|
74938
|
from django.conf.urls import url
from .views import (
post_model_create_view,
post_model_detail_view,
post_model_delete_view,
post_model_list_view,
post_model_update_view
)
urlpatterns = [
url(r'^$', post_model_list_view, name='list'),
url(r'^create/$', post_model_create_view, name='create'),
url(r'^(?P<id>\d+)/$', post_model_detail_view, name='detail'),
url(r'^(?P<id>\d+)/delete/$', post_model_delete_view, name='delete'),
url(r'^(?P<id>\d+)/edit/$', post_model_update_view, name='update'),
#url(r'^admin/', admin.site.urls),
#url(r'^$', home, name='home'),
#url(r'^redirect/$', redirect_somewhere, name='home')
]
|
74944
|
from django.core.cache import cache
from django.shortcuts import render, get_object_or_404
from django_redis import get_redis_connection
from .models import Image
# Create your views here.
con = get_redis_connection("default")
def index(request):
query = Image.objects.all()
images_seq = [
{'id': data.id,
'Url': data.url,
'CreateDate': data.create_date}
for data in query
]
cache.get_or_set('click', 0, timeout=None)
total_views = cache.incr('click')
rank = con.zrevrange(name='images', start=0, end=9, withscores=True, score_cast_func=int)
rank_seq = [
{"url": str(r[0], 'utf-8'),
"value": r[1]}
for r in rank
]
return render(request, 'images/index.html', {
'images': images_seq,
'total_views': total_views,
'ranks': rank_seq,
})
def detail(request, image_id):
image = get_object_or_404(Image, id=image_id)
total_views = con.zincrby(name='images', value=image.url)
return render(request,
'images/detail.html', {
'image': image,
'total_views': int(total_views)
})
|
74951
|
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScCuritibanosSpider(FecamGazetteSpider):
name = "sc_curitibanos"
FECAM_QUERY = "cod_entidade:82"
TERRITORY_ID = "4204806"
|
74972
|
class Test:
"""
Lorem ipsum dolor sit amet, consectetur adipiscing elit,
deserunt mollit anim id est laborum.
.. sourcecode:: pycon
>>> # extract 100 LDA topics, using default parameters
>>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)
using distributed version with 4 workers
running online LDA training, 100 topics, 1 passes over the supplied corpus of 3199665 documets,
updating model once every 40000 documents
..
Some another text
"""
some_field = 1
|
74982
|
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch.utils.data as Data
import numpy as np
import time
import sys
import utils
print('生成测试数据')
n_train, n_test, num_inputs = 20, 100, 200
true_w, true_b = torch.ones(num_inputs, 1) * 0.01, 0.05
features = torch.randn((n_train + n_test, num_inputs))
labels = torch.matmul(features, true_w) + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
train_features, test_features = features[:n_train, :], features[n_train:, :]
train_labels, test_labels = labels[:n_train], labels[n_train:]
print('初始化模型参数')
def init_params():
w = torch.randn((num_inputs, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
return [w, b]
print('定义 L2 惩罚项')
def l2_penalty(w):
return (w**2).sum() / 2
print('定义训练和测试')
batch_size, num_epochs, lr = 1, 100, 0.003
net, loss = utils.linreg, utils.squared_loss
dataset = torch.utils.data.TensorDataset(train_features, train_labels)
train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True)
def fit_and_plot(lambd):
w, b = init_params()
train_ls, test_ls = [], []
for _ in range(num_epochs):
for X, y in train_iter:
l = loss(net(X, w, b), y) + lambd * l2_penalty(w)
l = l.sum()
if w.grad is not None:
w.grad.data.zero_()
b.grad.data.zero_()
l.backward()
utils.sgd([w, b], lr, batch_size)
train_ls.append(loss(net(train_features, w, b), train_labels).mean().item())
test_ls.append(loss(net(test_features, w, b), test_labels).mean().item())
utils.semilogy(range(1, num_epochs+1), train_ls, 'epochs', 'loss',
range(1, num_epochs+1), test_ls, ['train', 'test'])
print('L2 norm of w:', w.norm().item())
print('观察过拟合')
fit_and_plot(lambd=0)
print('使用权重衰减')
fit_and_plot(lambd=4)
|
74998
|
import numpy as np
import unittest
from convolution import conv2d, add_padding
class TestConvolution(unittest.TestCase):
def test_paddings_shape(self, N: int = 1000):
for _ in range(N):
m_h = np.random.randint(3, 100)
m_w = np.random.randint(3, 100)
random_matrix = np.random.rand(m_h, m_w)
rows, cols = np.random.randint(0, 100, 2)
random_matrix_with_padding = add_padding(random_matrix, (rows, cols))
self.assertEqual(random_matrix_with_padding.shape, (m_h + rows*2, m_w + cols*2))
def test_random_case(self, N: int = 1000):
for _ in range(N):
d = np.random.randint(1, 100, 2)
k = np.random.choice([1, 3, 5, 7, 9, 10], 2) # `10` is to check oddness assertion
random_matrix = np.random.rand(*d)
random_kernel = np.random.rand(*k)
for __ in range(N):
stride = np.random.randint(0, 5, 2) # `0` is to check parameters assertion
dilation = np.random.randint(0, 5, 2) # `0` is to check parameters assertion
padding = np.random.randint(-1, 5, 2) # `-1` is to check parameters assertion
try: # `try` in case of division by zero when stride[0] or stride[1] equal to zero
h_out = np.floor((d[0] + 2 * padding[0] - k[0] - (k[0] - 1) * (dilation[0] - 1)) / stride[0]).astype(int) + 1
w_out = np.floor((d[1] + 2 * padding[1] - k[1] - (k[1] - 1) * (dilation[1] - 1)) / stride[1]).astype(int) + 1
except:
h_out, w_out = None, None
# print(f'Matr: {d} | Kern: {k} | Stri: {stride} | Dila: {dilation} | Padd: {padding} | OutD: {h_out, w_out}') # for debugging
if (stride[0] < 1 or stride[1] < 1 or dilation[0] < 1 or dilation[1] < 1 or padding[0] < 0 or padding[1] < 0 or
not isinstance(stride[0], int) or not isinstance(stride[1], int) or not isinstance(dilation[0], int) or
not isinstance(dilation[1], int) or not isinstance(padding[0], int) or not isinstance(padding[1], int)):
with self.assertRaises(AssertionError):
matrix_conved = conv2d(random_matrix, random_kernel, stride=stride, dilation=dilation, padding=padding)
elif k[0] % 2 != 1 or k[1] % 2 != 1:
with self.assertRaises(AssertionError):
matrix_conved = conv2d(random_matrix, random_kernel, stride=stride, dilation=dilation, padding=padding)
elif d[0] < k[0] or d[1] < k[1]:
with self.assertRaises(AssertionError):
matrix_conved = conv2d(random_matrix, random_kernel, stride=stride, dilation=dilation, padding=padding)
elif h_out <= 0 or w_out <= 0:
with self.assertRaises(AssertionError):
matrix_conved = conv2d(random_matrix, random_kernel, stride=stride, dilation=dilation, padding=padding)
else:
matrix_conved = conv2d(random_matrix, random_kernel, stride=stride, dilation=dilation, padding=padding)
self.assertEqual(matrix_conved.shape, (h_out, w_out))
def test_kernel_3x3_easy(self):
matrix = np.array([[0, 4, 3, 2, 0, 1, 0],
[4, 3, 0, 1, 0, 1, 0],
[1, 3, 4, 2, 0, 1, 0],
[3, 4, 2, 2, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0]])
kernel = np.array([[1, 1, 3],
[0, 2, 3],
[3, 3, 3]])
# stride = 1, dilation = 1, padding = 0
result_110 = conv2d(matrix, kernel)
answer_110 = np.array([[43, 43, 25, 17, 6],
[52, 44, 17, 16, 6],
[30, 23, 10, 11, 6]])
# stride = 1, dilation = 1, padding = 1
result_111 = conv2d(matrix, kernel, padding=(1, 1))
answer_111 = np.array([[33, 38, 24, 7, 9, 5, 3],
[41, 43, 43, 25, 17, 6, 4],
[45, 52, 44, 17, 16, 6, 4],
[28, 30, 23, 10, 11, 6, 4],
[15, 13, 12, 4, 8, 3, 1]])
# stride = 1, dilation = 2, padding = 0
result_120 = conv2d(matrix, kernel, dilation=(2, 2))
answer_120 = np.array([[11, 19, 3]])
# stride = 1, dilation = 2, padding = 1
result_121 = conv2d(matrix, kernel, dilation=(2, 2), padding=(1, 1))
answer_121 = np.array([[27, 15, 26, 6, 11],
[22, 11, 19, 3, 8],
[20, 8, 14, 0, 4]])
# stride = 2, dilation = 1, padding = 0
result_210 = conv2d(matrix, kernel, stride=(2, 2))
answer_210 = np.array([[43, 25, 6],
[30, 10, 6]])
# stride = 2, dilation = 1, padding = 1
result_211 = conv2d(matrix, kernel, stride=(2, 2), padding=(1, 1))
answer_211 = np.array([[33, 24, 9, 3],
[45, 44, 16, 4],
[15, 12, 8, 1]])
# stride = 2, dilation = 2, padding = 0
result_220 = conv2d(matrix, kernel, stride=(2, 2), dilation=(2, 2))
answer_220 = np.array([[11, 3]])
# stride = 2, dilation = 2, padding = 1
result_221 = conv2d(matrix, kernel, stride=(2, 2), dilation=(2, 2), padding=(1, 1))
answer_221 = np.array([[27, 26, 11],
[20, 14, 4]])
self.assertEqual(result_110.tolist(), answer_110.tolist())
self.assertEqual(result_111.tolist(), answer_111.tolist())
self.assertEqual(result_120.tolist(), answer_120.tolist())
self.assertEqual(result_121.tolist(), answer_121.tolist())
self.assertEqual(result_210.tolist(), answer_210.tolist())
self.assertEqual(result_211.tolist(), answer_211.tolist())
self.assertEqual(result_220.tolist(), answer_220.tolist())
self.assertEqual(result_221.tolist(), answer_221.tolist())
def test_kernel_5x5_difficult(self):
matrix = np.array([[1, 4, 4, 2, 1, 0, 0, 1, 0, 0, 3, 3, 3, 4],
[0, 2, 0, 2, 0, 3, 4, 4, 2, 1, 1, 3, 0, 4],
[1, 1, 0, 0, 3, 4, 2, 4, 4, 2, 3, 0, 0, 4],
[4, 0, 1, 2, 0, 2, 0, 3, 3, 3, 0, 4, 1, 0],
[3, 0, 0, 3, 3, 3, 2, 0, 2, 1, 1, 0, 4, 2],
[2, 4, 3, 1, 1, 0, 2, 1, 3, 4, 4, 0, 2, 3],
[2, 4, 3, 3, 2, 1, 4, 0, 3, 4, 1, 2, 0, 0],
[2, 1, 0, 1, 1, 2, 2, 3, 0, 0, 1, 2, 4, 2],
[3, 3, 1, 1, 1, 1, 4, 4, 2, 3, 2, 2, 2, 3]])
kernel = np.array([[2, 0, 2, 2, 2],
[2, 3, 1, 1, 3],
[3, 1, 1, 3, 1],
[2, 2, 3, 1, 1],
[0, 0, 1, 0, 0]])
# default params
result_11_11_00 = conv2d(matrix, kernel)
answer_11_11_00 = np.array([[44., 58., 59., 62., 70., 80., 75., 92., 64., 72.],
[52., 52., 59., 87., 92., 83., 77., 74., 71., 67.],
[66., 63., 60., 64., 76., 79., 75., 82., 77., 64.],
[75., 69., 64., 64., 69., 75., 70., 71., 75., 74.],
[74., 71., 63., 66., 61., 75., 79., 47., 73., 76.]])
# only stride: (1, 2), (1, 3), (2, 1), (2, 2), (2, 3), (3, 1), (3, 2), (4, 6)
result_12_11_00 = conv2d(matrix, kernel, stride=(1, 2))
answer_12_11_00 = np.array([[44., 59., 70., 75., 64.],
[52., 59., 92., 77., 71.],
[66., 60., 76., 75., 77.],
[75., 64., 69., 70., 75.],
[74., 63., 61., 79., 73.]])
result_13_11_00 = conv2d(matrix, kernel, stride=(1, 3))
answer_13_11_00 = np.array([[44., 62., 75., 72.],
[52., 87., 77., 67.],
[66., 64., 75., 64.],
[75., 64., 70., 74.],
[74., 66., 79., 76.]])
result_21_11_00 = conv2d(matrix, kernel, stride=(2, 1))
answer_21_11_00 = np.array([[44., 58., 59., 62., 70., 80., 75., 92., 64., 72.],
[66., 63., 60., 64., 76., 79., 75., 82., 77., 64.],
[74., 71., 63., 66., 61., 75., 79., 47., 73., 76.]])
result_22_11_00 = conv2d(matrix, kernel, stride=(2, 2))
answer_22_11_00 = np.array([[44., 59., 70., 75., 64],
[66., 60., 76., 75., 77],
[74., 63., 61., 79., 73]])
result_23_11_00 = conv2d(matrix, kernel, stride=(2, 3))
answer_23_11_00 = np.array([[44., 62., 75., 72.],
[66., 64., 75., 64.],
[74., 66., 79., 76.]])
result_31_11_00 = conv2d(matrix, kernel, stride=(3, 1))
answer_31_11_00 = np.array([[44., 58., 59., 62., 70., 80., 75., 92., 64., 72.],
[75., 69., 64., 64., 69., 75., 70., 71., 75., 74.]])
result_32_11_00 = conv2d(matrix, kernel, stride=(3, 2))
answer_32_11_00 = np.array([[44., 59., 70., 75., 64.],
[75., 64., 69., 70., 75.]])
result_46_11_00 = conv2d(matrix, kernel, stride=(4, 6))
answer_46_11_00 = np.array([[44., 75.],
[74., 79.]])
# only dilation: (1, 2), (1, 3), (2, 1), (2, 2), (2, 3)
result_11_12_00 = conv2d(matrix, kernel, dilation=(1, 2))
answer_11_12_00 = np.array([[46., 70., 50., 77., 65., 94.],
[67., 68., 67., 76., 53., 95.],
[80., 65., 60., 64., 70., 73.],
[74., 74., 77., 73., 79., 55.],
[81., 66., 74., 60., 70., 58.]])
result_11_13_00 = conv2d(matrix, kernel, dilation=(1, 3))
answer_11_13_00 = np.array([[48., 77.],
[65., 65.],
[73., 55.],
[97., 67.],
[84., 68.]])
result_11_21_00 = conv2d(matrix, kernel, dilation=(2, 1))
answer_11_21_00 = np.array([[78., 73., 64., 72., 81., 69., 73., 69., 68., 81.]])
result_11_22_00 = conv2d(matrix, kernel, dilation=(2, 2))
answer_11_22_00 = np.array([[67., 55., 80., 63., 77., 79.]])
result_11_23_00 = conv2d(matrix, kernel, dilation=(2, 3))
answer_11_23_00 = np.array([[65., 79.]])
# only paddings: (0, 1), (1, 0), (1, 1)
result_11_11_01 = conv2d(matrix, kernel, padding=(0, 1))
answer_11_11_01 = np.array([[41., 44., 58., 59., 62., 70., 80., 75., 92., 64., 72., 71.],
[34., 52., 52., 59., 87., 92., 83., 77., 74., 71., 67., 43.],
[51., 66., 63., 60., 64., 76., 79., 75., 82., 77., 64., 57.],
[63., 75., 69., 64., 64., 69., 75., 70., 71., 75., 74., 43.],
[51., 74., 71., 63., 66., 61., 75., 79., 47., 73., 76., 54.]])
result_11_11_10 = conv2d(matrix, kernel, padding=(1, 0))
answer_11_11_10 = np.array([[39., 45., 45., 61., 52., 58., 66., 63., 53., 56.],
[44., 58., 59., 62., 70., 80., 75., 92., 64., 72.],
[52., 52., 59., 87., 92., 83., 77., 74., 71., 67.],
[66., 63., 60., 64., 76., 79., 75., 82., 77., 64.],
[75., 69., 64., 64., 69., 75., 70., 71., 75., 74.],
[74., 71., 63., 66., 61., 75., 79., 47., 73., 76.],
[70., 59., 64., 55., 72., 83., 81., 77., 70., 69.]])
result_11_11_11 = conv2d(matrix, kernel, padding=(1, 1))
answer_11_11_11 = np.array([[26., 39., 45., 45., 61., 52., 58., 66., 63., 53., 56., 51.],
[41., 44., 58., 59., 62., 70., 80., 75., 92., 64., 72., 71.],
[34., 52., 52., 59., 87., 92., 83., 77., 74., 71., 67., 43.],
[51., 66., 63., 60., 64., 76., 79., 75., 82., 77., 64., 57.],
[63., 75., 69., 64., 64., 69., 75., 70., 71., 75., 74., 43.],
[51., 74., 71., 63., 66., 61., 75., 79., 47., 73., 76., 54.],
[59., 70., 59., 64., 55., 72., 83., 81., 77., 70., 69., 58.]])
# different sets of parameters
result_21_13_00 = conv2d(matrix, kernel, stride=(2, 1), dilation=(1, 3), padding=(0, 0))
answer_21_13_00 = np.array([[48., 77.],
[73., 55.],
[84., 68.]])
result_23_13_13 = conv2d(matrix, kernel, stride=(2, 3), dilation=(1, 3), padding=(1, 3))
answer_23_13_13 = np.array([[28., 36., 31.],
[53., 65., 47.],
[62., 97., 70.],
[64., 79., 74.]])
result_32_23_22 = conv2d(matrix, kernel, stride=(3, 2), dilation=(2, 3), padding=(2, 2))
answer_32_23_22 = np.array([[54., 55., 34.],
[34., 69., 43.]])
# default params
self.assertEqual(result_11_11_00.tolist(), answer_11_11_00.tolist())
# only stride: (1, 2), (1, 3), (2, 1), (2, 2), (2, 3), (3, 1), (3, 2), (4, 6)
self.assertEqual(result_12_11_00.tolist(), answer_12_11_00.tolist())
self.assertEqual(result_13_11_00.tolist(), answer_13_11_00.tolist())
self.assertEqual(result_21_11_00.tolist(), answer_21_11_00.tolist())
self.assertEqual(result_22_11_00.tolist(), answer_22_11_00.tolist())
self.assertEqual(result_23_11_00.tolist(), answer_23_11_00.tolist())
self.assertEqual(result_31_11_00.tolist(), answer_31_11_00.tolist())
self.assertEqual(result_32_11_00.tolist(), answer_32_11_00.tolist())
self.assertEqual(result_46_11_00.tolist(), answer_46_11_00.tolist())
# only dilation: (1, 2), (1, 3), (2, 1), (2, 2), (2, 3)
self.assertEqual(result_11_12_00.tolist(), answer_11_12_00.tolist())
self.assertEqual(result_11_13_00.tolist(), answer_11_13_00.tolist())
self.assertEqual(result_11_21_00.tolist(), answer_11_21_00.tolist())
self.assertEqual(result_11_22_00.tolist(), answer_11_22_00.tolist())
self.assertEqual(result_11_23_00.tolist(), answer_11_23_00.tolist())
# only paddings: (0, 1), (1, 0), (1, 1)
self.assertEqual(result_11_11_01.tolist(), answer_11_11_01.tolist())
self.assertEqual(result_11_11_10.tolist(), answer_11_11_10.tolist())
self.assertEqual(result_11_11_11.tolist(), answer_11_11_11.tolist())
# different sets of parameters
self.assertEqual(result_21_13_00.tolist(), answer_21_13_00.tolist())
self.assertEqual(result_23_13_13.tolist(), answer_23_13_13.tolist())
self.assertEqual(result_32_23_22.tolist(), answer_32_23_22.tolist())
def test_kernel_5x3_difficult(self):
matrix = np.array([[0, 4, 3, 2, 0, 1, 0],
[4, 3, 0, 1, 0, 1, 0],
[1, 3, 4, 2, 0, 1, 0],
[3, 4, 2, 2, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0]])
kernel = np.array([[1, 1, 3],
[0, 2, 3],
[3, 3, 3],
[0, 2, 1],
[3, 3, 0]])
# default params
result_11_11_00 = conv2d(matrix, kernel, stride=(1, 1), dilation=(1, 1), padding=(0, 0))
answer_11_11_00 = np.array([[53., 49., 29., 18., 11.]])
# different sets of parameters
result_21_13_00 = conv2d(matrix, kernel, stride=(2, 1), dilation=(1, 3), padding=(0, 0))
answer_21_13_00 = np.array([[17.]])
result_23_13_13 = conv2d(matrix, kernel, stride=(2, 3), dilation=(1, 3), padding=(1, 3))
answer_23_13_13 = np.array([[34., 38., 9.],
[30., 24., 7.]])
result_32_23_42 = conv2d(matrix, kernel, stride=(3, 2), dilation=(2, 3), padding=(4, 2))
answer_32_23_42 = np.array([[18., 10., 17.],
[18., 17., 11.]])
result_21_12_04 = conv2d(matrix, kernel, stride=(2, 1), dilation=(1, 2), padding=(0, 4))
answer_21_12_04 = np.array([[18., 34., 40., 44., 22., 37., 15., 19., 0., 7., 0.]])
result_22_12_04 = conv2d(matrix, kernel, stride=(2, 2), dilation=(1, 2), padding=(0, 4))
answer_22_12_04 = np.array([[18., 40., 22., 15., 0., 0.]])
result_23_13_25 = conv2d(matrix, kernel, stride=(2, 3), dilation=(1, 3), padding=(2, 5))
answer_23_13_25 = np.array([[15., 27., 21., 0.],
[34., 27., 13., 0.],
[21., 11., 3., 0.]])
result_11_11_33 = conv2d(matrix, kernel, stride=(1, 1), dilation=(1, 1), padding=(3, 3))
answer_11_11_33 = np.array([[ 0., 0., 16., 32., 17., 7., 4., 5., 3., 0., 0.],
[ 0., 4., 26., 39., 49., 35., 16., 8., 6., 0., 0.],
[ 0., 13., 47., 69., 52., 23., 16., 10., 6., 0., 0.],
[ 0., 18., 51., 53., 49., 29., 18., 11., 7., 0., 0.],
[ 0., 24., 45., 52., 44., 17., 17., 8., 4., 0., 0.],
[ 0., 12., 28., 30., 23., 10., 11., 6., 4., 0., 0.],
[ 0., 9., 15., 13., 12., 4., 8., 3., 1., 0., 0.]])
# default params
self.assertEqual(result_11_11_00.tolist(), answer_11_11_00.tolist())
# different sets of parameters
self.assertEqual(result_21_13_00.tolist(), answer_21_13_00.tolist())
self.assertEqual(result_23_13_13.tolist(), answer_23_13_13.tolist())
self.assertEqual(result_32_23_42.tolist(), answer_32_23_42.tolist())
self.assertEqual(result_21_12_04.tolist(), answer_21_12_04.tolist())
self.assertEqual(result_22_12_04.tolist(), answer_22_12_04.tolist())
self.assertEqual(result_23_13_25.tolist(), answer_23_13_25.tolist())
self.assertEqual(result_11_11_33.tolist(), answer_11_11_33.tolist())
if __name__ == '__main__':
unittest.main()
|
75008
|
import os
from arcsv.helper import get_ucsc_name
def get_inverted_pair(pair, bam):
chrom = bam.getrname(pair[0].rname) # already checked reads on same chrom
if pair[0].pos < pair[1].pos:
left = pair[0]
right = pair[1]
else:
left = pair[1]
right = pair[0]
left_coord = (left.reference_start, left.reference_end)
right_coord = (right.reference_start, right.reference_end)
return chrom, left_coord, right_coord, left.is_reverse
def inverted_pair_to_bed12(ipair):
chrom = get_ucsc_name(ipair[0])
left_coord = ipair[1]
right_coord = ipair[2]
is_reverse = ipair[3]
strand = '-' if is_reverse else '+'
if left_coord[1] >= right_coord[0]:
# ucsc doesn't support overlapping blocks
template = ('{chr}\t{start}\t{end}\t{str}/{str}\t0'
'\t{str}\t{start}\t{end}\t0\t1\t{len}\t0\n')
line1 = template.format(chr=chrom, start=left_coord[0], end=left_coord[1] + 1,
str=strand, len=left_coord[1] - left_coord[0] + 1)
line2 = template.format(chr=chrom, start=right_coord[0], end=right_coord[1] + 1,
str=strand, len=right_coord[1] - right_coord[0] + 1)
return line1 + line2
else:
block1_len = left_coord[1] - left_coord[0] + 1
block2_len = right_coord[1] - right_coord[0] + 1
block1_start = 0
block2_start = right_coord[0] - left_coord[0]
template = ('{chr}\t{start}\t{end}\t{str}/{str}\t0\t{str}'
'\t{start}\t{end}\t0\t2\t{b1},{b2},\t{b1start},{b2start}\n')
return template.format(chr=chrom, start=left_coord[0], end=right_coord[1] + 1,
str=strand, b1=block1_len, b2=block2_len,
b1start=block1_start, b2start=block2_start)
def write_inverted_pairs_bed(ipairs, fileprefix):
file = open(fileprefix + '.bed', 'w')
for ipair in ipairs:
file.write(inverted_pair_to_bed12(ipair))
file.close()
def write_inverted_pairs_bigbed(ipairs, fileprefix):
write_inverted_pairs_bed(ipairs, fileprefix)
os.system('sort -k1,1 -k2,2n {0}.bed > tmpsorted'.format(fileprefix))
os.system('mv tmpsorted {0}.bed'.format(fileprefix))
os.system('bedToBigBed -type=bed12 {0}.bed'
'/scratch/PI/whwong/svproject/reference/hg19.chrom.sizes {0}.bb'
.format(fileprefix))
|
75015
|
from __future__ import annotations
import typing
import toolcli
from ctc import evm
from ctc import spec
from ctc.cli import cli_utils
def get_command_spec() -> toolcli.CommandSpec:
return {
'f': async_events_command,
'help': 'get contract events',
'args': [
{
'name': 'contract',
'help': 'contract address of event',
},
{
'name': 'event',
'help': 'event name or event hash',
},
{
'name': '--blocks',
'help': 'block range',
'nargs': '+',
},
{
'name': '--output',
'default': 'stdout',
'help': 'file path for output (.json or .csv)',
},
{
'name': '--overwrite',
'action': 'store_true',
'help': 'specify that output path can be overwritten',
},
{
'name': '--verbose',
'help': 'display more event data',
'action': 'store_true',
},
],
'examples': [
'0x956f47f50a910163d8bf957cf5846d573e7f87ca Transfer',
'0x956f47f50a910163d8bf957cf5846d573e7f87ca Transfer --blocks [14000000, 14100000]',
],
}
async def async_events_command(
contract: str,
event: str,
blocks: typing.Sequence[str],
output: str,
overwrite: bool,
verbose: bool,
) -> None:
contract = await evm.async_resolve_address(contract)
if blocks is not None:
all_blocks = await cli_utils.async_resolve_block_range(blocks)
start_block = all_blocks[0]
end_block = all_blocks[-1]
else:
start_block = None
end_block = None
if event.startswith('0x'):
events: spec.DataFrame = await evm.async_get_events(
contract_address=contract,
start_block=start_block,
end_block=end_block,
verbose=False,
event_hash=event,
)
else:
events = await evm.async_get_events(
contract_address=contract,
start_block=start_block,
end_block=end_block,
verbose=False,
event_name=event,
)
if len(events) == 0:
print('[no events found]')
else:
# output
if not verbose:
events.index = typing.cast(
spec.PandasIndex,
[
str(value)
for value in events.index.get_level_values('block_number')
]
)
events.index.name = 'block'
events = events[
[column for column in events.columns if column.startswith('arg__')]
]
new_column_names = {
old_column: old_column[5:]
for old_column in events.columns
}
events = events.rename(columns=new_column_names)
cli_utils.output_data(events, output=output, overwrite=overwrite)
|
75025
|
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent))
import unittest
import nanopq
import numpy as np
class TestSuite(unittest.TestCase):
def setUp(self):
np.random.seed(123)
def test_property(self):
opq = nanopq.OPQ(M=4, Ks=256)
self.assertEqual(
(opq.M, opq.Ks, opq.verbose, opq.code_dtype),
(opq.pq.M, opq.pq.Ks, opq.pq.verbose, opq.pq.code_dtype),
)
def test_fit(self):
N, D, M, Ks = 100, 12, 4, 10
X = np.random.random((N, D)).astype(np.float32)
opq = nanopq.OPQ(M=M, Ks=Ks)
opq.fit(X)
self.assertEqual(opq.Ds, D / M)
self.assertEqual(opq.codewords.shape, (M, Ks, D / M))
self.assertEqual(opq.R.shape, (D, D))
opq2 = nanopq.OPQ(M=M, Ks=Ks).fit(X) # Can be called as a chain
self.assertTrue(np.allclose(opq.codewords, opq2.codewords))
def test_eq(self):
import copy
N, D, M, Ks = 100, 12, 4, 10
X = np.random.random((N, D)).astype(np.float32)
opq1 = nanopq.OPQ(M=M, Ks=Ks)
opq2 = nanopq.OPQ(M=M, Ks=Ks)
opq3 = copy.deepcopy(opq1)
opq4 = nanopq.OPQ(M=M, Ks=2 * Ks)
self.assertTrue(opq1 == opq1)
self.assertTrue(opq1 == opq2)
self.assertTrue(opq1 == opq3)
self.assertTrue(opq1 != opq4)
opq1.fit(X)
opq2.fit(X)
opq3 = copy.deepcopy(opq1)
opq4.fit(X)
self.assertTrue(opq1 == opq1)
self.assertTrue(opq1 == opq2)
self.assertTrue(opq1 == opq3)
self.assertTrue(opq1 != opq4)
def test_rotate(self):
N, D, M, Ks = 100, 12, 4, 10
X = np.random.random((N, D)).astype(np.float32)
opq = nanopq.OPQ(M=M, Ks=Ks)
opq.fit(X)
rotated_vec = opq.rotate(X[0])
rotated_vecs = opq.rotate(X[:3])
self.assertEqual(rotated_vec.shape, (D,))
self.assertEqual(rotated_vecs.shape, (3, D))
# Because R is a rotation matrix (R^t * R = I), R^t should be R^(-1)
self.assertAlmostEqual(
np.linalg.norm(opq.R.T - np.linalg.inv(opq.R)), 0.0, places=3
)
if __name__ == "__main__":
unittest.main()
|
75105
|
from typing import List
from unittest.case import TestCase
from uuid import uuid4
from eventsourcing.application import Application
from eventsourcing.persistence import Notification
from eventsourcing.system import (
AlwaysPull,
Follower,
Leader,
NeverPull,
ProcessApplication,
Promptable,
PullGaps,
System,
)
from eventsourcing.tests.test_application_with_popo import BankAccounts
from eventsourcing.tests.test_processapplication import EmailProcess
from eventsourcing.utils import get_topic
class TestSystem(TestCase):
def test_graph(self):
system = System(
pipes=[
[
BankAccounts,
EmailProcess,
],
[Application],
]
)
self.assertEqual(len(system.nodes), 3)
self.assertEqual(system.nodes["BankAccounts"], get_topic(BankAccounts))
self.assertEqual(system.nodes["EmailProcess"], get_topic(EmailProcess))
self.assertEqual(system.nodes["Application"], get_topic(Application))
self.assertEqual(system.leaders, ["BankAccounts"])
self.assertEqual(system.followers, ["EmailProcess"])
self.assertEqual(system.singles, ["Application"])
self.assertEqual(len(system.edges), 1)
self.assertIn(
(
"BankAccounts",
"EmailProcess",
),
system.edges,
)
self.assertEqual(len(system.singles), 1)
def test_raises_type_error_not_a_follower(self):
with self.assertRaises(TypeError) as cm:
System(
pipes=[
[
BankAccounts,
Leader,
],
]
)
exception = cm.exception
self.assertEqual(
exception.args[0],
"Not a follower class: <class 'eventsourcing.system.Leader'>",
)
def test_raises_type_error_not_a_processor(self):
with self.assertRaises(TypeError) as cm:
System(
pipes=[
[
BankAccounts,
Follower,
EmailProcess,
],
]
)
exception = cm.exception
self.assertEqual(
exception.args[0],
"Not a process application class: <class 'eventsourcing.system.Follower'>",
)
def test_is_leaders_only(self):
system = System(
pipes=[
[
Leader,
ProcessApplication,
ProcessApplication,
],
]
)
self.assertEqual(list(system.leaders_only), ["Leader"])
def test_leader_class(self):
system = System(
pipes=[
[
Application,
ProcessApplication,
ProcessApplication,
],
]
)
self.assertTrue(issubclass(system.leader_cls("Application"), Leader))
self.assertTrue(issubclass(system.leader_cls("ProcessApplication"), Leader))
class TestLeader(TestCase):
def test(self):
# Define fixture that receives prompts.
class FollowerFixture(Promptable):
def __init__(self):
self.num_prompts = 0
def receive_notifications(
self, leader_name: str, notifications: List[Notification]
) -> None:
self.num_prompts += 1
# Test fixture is working.
follower = FollowerFixture()
follower.receive_notifications("", [])
self.assertEqual(follower.num_prompts, 1)
# Construct leader.
leader = Leader()
leader.lead(follower)
# Check follower receives a prompt when there are new events.
leader.notify(
[
Notification(
id=1,
originator_id=uuid4(),
originator_version=0,
topic="topic1",
state=b"",
)
]
)
self.assertEqual(follower.num_prompts, 2)
# Check follower doesn't receive prompt when no new events.
leader.save()
self.assertEqual(follower.num_prompts, 2)
class TestPullMode(TestCase):
def test_always_pull(self):
mode = AlwaysPull()
self.assertTrue(mode.chose_to_pull(1, 1))
self.assertTrue(mode.chose_to_pull(2, 1))
def test_never_pull(self):
mode = NeverPull()
self.assertFalse(mode.chose_to_pull(1, 1))
self.assertFalse(mode.chose_to_pull(2, 1))
def test_pull_gaps(self):
mode = PullGaps()
self.assertFalse(mode.chose_to_pull(1, 1))
self.assertTrue(mode.chose_to_pull(2, 1))
|
75107
|
import os
import os.path as osp
import random as rd
import subprocess
from typing import Optional, Tuple, Union
import click
from mim.click import CustomCommand, param2lowercase
from mim.utils import (
echo_success,
exit_with_error,
get_installed_path,
highlighted_error,
is_installed,
module_full_name,
recursively_find,
)
@click.command(
name='train',
context_settings=dict(ignore_unknown_options=True),
cls=CustomCommand)
@click.argument('package', type=str, callback=param2lowercase)
@click.argument('config', type=str)
@click.option(
'-l',
'--launcher',
type=click.Choice(['none', 'pytorch', 'slurm'], case_sensitive=False),
default='none',
help='Job launcher')
@click.option(
'--port',
type=int,
default=None,
help=('The port used for inter-process communication (only applicable to '
'slurm / pytorch launchers). If set to None, will randomly choose '
'a port between 20000 and 30000. '))
@click.option(
'-G', '--gpus', type=int, default=1, help='Number of gpus to use')
@click.option(
'-g',
'--gpus-per-node',
type=int,
help=('Number of gpus per node to use '
'(only applicable to launcher == "slurm")'))
@click.option(
'-c',
'--cpus-per-task',
type=int,
default=2,
help='Number of cpus per task (only applicable to launcher == "slurm")')
@click.option(
'-p',
'--partition',
type=str,
help='The partition to use (only applicable to launcher == "slurm")')
@click.option(
'--srun-args', type=str, help='Other srun arguments that might be used')
@click.option('-y', '--yes', is_flag=True, help='Don’t ask for confirmation.')
@click.argument('other_args', nargs=-1, type=click.UNPROCESSED)
def cli(package: str,
config: str,
gpus: int,
gpus_per_node: int,
partition: str,
cpus_per_task: int = 2,
launcher: str = 'none',
port: int = None,
srun_args: Optional[str] = None,
yes: bool = False,
other_args: tuple = ()) -> None:
"""Perform Training.
Example:
\b
# Train models on a single server with CPU by setting `gpus` to 0 and
# 'launcher' to 'none' (if applicable). The training script of the
# corresponding codebase will fail if it doesn't support CPU training.
> mim train mmcls resnet101_b16x8_cifar10.py --work-dir tmp --gpus 0
# Train models on a single server with one GPU
> mim train mmcls resnet101_b16x8_cifar10.py --work-dir tmp --gpus 1
# Train models on a single server with 4 GPUs and pytorch distributed
> mim train mmcls resnet101_b16x8_cifar10.py --work-dir tmp --gpus 4 \
--launcher pytorch
# Train models on a slurm HPC with one 8-GPU node
> mim train mmcls resnet101_b16x8_cifar10.py --launcher slurm --gpus 8 \
--gpus-per-node 8 --partition partition_name --work-dir tmp
# Print help messages of sub-command train
> mim train -h
# Print help messages of sub-command train and the training script of mmcls
> mim train mmcls -h
"""
is_success, msg = train(
package=package,
config=config,
gpus=gpus,
gpus_per_node=gpus_per_node,
cpus_per_task=cpus_per_task,
partition=partition,
launcher=launcher,
port=port,
srun_args=srun_args,
yes=yes,
other_args=other_args)
if is_success:
echo_success(msg) # type: ignore
else:
exit_with_error(msg)
def train(
package: str,
config: str,
gpus: int,
gpus_per_node: int = None,
cpus_per_task: int = 2,
partition: str = None,
launcher: str = 'none',
port: int = None,
srun_args: Optional[str] = None,
yes: bool = True,
other_args: tuple = ()
) -> Tuple[bool, Union[str, Exception]]:
"""Train a model with given config.
Args:
package (str): The codebase name.
config (str): The config file path. If not exists, will search in the
config files of the codebase.
gpus (int): Number of gpus used for training.
gpus_per_node (int, optional): Number of gpus per node to use
(only applicable to launcher == "slurm"). Defaults to None.
cpus_per_task (int, optional): Number of cpus per task to use
(only applicable to launcher == "slurm"). Defaults to None.
partition (str, optional): The partition name
(only applicable to launcher == "slurm"). Defaults to None.
launcher (str, optional): The launcher used to launch jobs.
Defaults to 'none'.
port (int | None, optional): The port used for inter-process
communication (only applicable to slurm / pytorch launchers).
Default to None. If set to None, will randomly choose a port
between 20000 and 30000.
srun_args (str, optional): Other srun arguments that might be
used, all arguments should be in a string. Defaults to None.
yes (bool): Don’t ask for confirmation. Default: True.
other_args (tuple, optional): Other arguments, will be passed to the
codebase's training script. Defaults to ().
"""
full_name = module_full_name(package)
if full_name == '':
msg = f"Can't determine a unique package given abbreviation {package}"
raise ValueError(highlighted_error(msg))
package = full_name
# If launcher == "slurm", must have following args
if launcher == 'slurm':
msg = ('If launcher is slurm, '
'gpus-per-node and partition should not be None')
flag = (gpus_per_node is not None) and (partition is not None)
assert flag, msg
if port is None:
port = rd.randint(20000, 30000)
if launcher in ['slurm', 'pytorch']:
click.echo(f'Using port {port} for synchronization. ')
if not is_installed(package):
msg = (f'The codebase {package} is not installed, '
'do you want to install the latest release? ')
if yes or click.confirm(msg):
click.echo(f'Installing {package}')
cmd = ['mim', 'install', package]
ret = subprocess.check_call(cmd)
if ret != 0:
msg = f'{package} is not successfully installed'
raise RuntimeError(highlighted_error(msg))
else:
click.echo(f'{package} is successfully installed')
else:
msg = f'You can not train this model without {package} installed.'
return False, msg
pkg_root = get_installed_path(package)
if not osp.exists(config):
# configs is put in pkg/.mim in PR #68
config_root = osp.join(pkg_root, '.mim', 'configs')
if not osp.exists(config_root):
# If not pkg/.mim/config, try to search the whole pkg root.
config_root = pkg_root
# pkg/.mim/configs is a symbolic link to the real config folder,
# so we need to follow links.
files = recursively_find(
pkg_root, osp.basename(config), followlinks=True)
if len(files) == 0:
msg = (f"The path {config} doesn't exist and we can not find "
f'the config file in codebase {package}.')
raise ValueError(highlighted_error(msg))
elif len(files) > 1:
msg = (
f"The path {config} doesn't exist and we find multiple "
f'config files with same name in codebase {package}: {files}.')
raise ValueError(highlighted_error(msg))
# Use realpath instead of the symbolic path in pkg/.mim
config_path = osp.realpath(files[0])
click.echo(
f"The path {config} doesn't exist but we find the config file "
f'in codebase {package}, will use {config_path} instead.')
config = config_path
# tools will be put in package/.mim in PR #68
train_script = osp.join(pkg_root, '.mim', 'tools', 'train.py')
if not osp.exists(train_script):
train_script = osp.join(pkg_root, 'tools', 'train.py')
common_args = ['--launcher', launcher] + list(other_args)
if launcher == 'none':
if gpus:
cmd = ['python', train_script, config, '--gpus',
str(gpus)] + common_args
else:
cmd = ['python', train_script, config, '--device', 'cpu'
] + common_args
elif launcher == 'pytorch':
cmd = [
'python', '-m', 'torch.distributed.launch',
f'--nproc_per_node={gpus}', f'--master_port={port}', train_script,
config
] + common_args
elif launcher == 'slurm':
parsed_srun_args = srun_args.split() if srun_args else []
has_job_name = any([('--job-name' in x) or ('-J' in x)
for x in parsed_srun_args])
if not has_job_name:
job_name = osp.splitext(osp.basename(config))[0]
parsed_srun_args.append(f'--job-name={job_name}_train')
cmd = [
'srun', '-p', f'{partition}', f'--gres=gpu:{gpus_per_node}',
f'--ntasks={gpus}', f'--ntasks-per-node={gpus_per_node}',
f'--cpus-per-task={cpus_per_task}', '--kill-on-bad-exit=1'
] + parsed_srun_args + ['python', '-u', train_script, config
] + common_args
cmd_text = ' '.join(cmd)
click.echo(f'Training command is {cmd_text}. ')
ret = subprocess.check_call(
cmd, env=dict(os.environ, MASTER_PORT=str(port)))
if ret == 0:
return True, 'Training finished successfully. '
else:
return False, 'Training not finished successfully. '
|
75108
|
import asyncio
import logging
from timeit import default_timer as timer
from podping_hivewriter.async_context import AsyncContext
from podping_hivewriter.models.podping_settings import PodpingSettings
from podping_hivewriter.podping_settings import get_podping_settings
from pydantic import ValidationError
class PodpingSettingsManager(AsyncContext):
def __init__(self, ignore_updates=False):
super().__init__()
self.ignore_updates = ignore_updates
self.last_update_time = float("-inf")
self._settings = PodpingSettings()
self._settings_lock = asyncio.Lock()
self._startup_done = False
asyncio.ensure_future(self._startup())
async def _startup(self):
if not self.ignore_updates:
self._add_task(asyncio.create_task(self._update_podping_settings_loop()))
self._startup_done = True
async def _update_podping_settings_loop(self):
while True:
try:
await self.update_podping_settings()
await asyncio.sleep(self._settings.control_account_check_period)
except Exception as e:
logging.error(e, exc_info=True)
except asyncio.CancelledError:
raise
async def update_podping_settings(self) -> None:
try:
podping_settings = await get_podping_settings(
self._settings.control_account
)
self.last_update_time = timer()
except ValidationError as e:
logging.warning(f"Problem with podping control settings: {e}")
else:
if self._settings != podping_settings:
logging.debug(
f"Configuration override from Podping Hive: {podping_settings}"
)
async with self._settings_lock:
self._settings = podping_settings
async def get_settings(self) -> PodpingSettings:
async with self._settings_lock:
return self._settings
|
75131
|
from operator import itemgetter
from collections import defaultdict
import scipy.stats as st
import numpy as np
import pandas as pd
def k_factor(margin_of_victory, elo_diff):
init_k = 20
if margin_of_victory>0:
multiplier = (margin_of_victory+3) ** (0.8) / (7.5 + 0.006 * (elo_diff))
else:
multiplier = (-margin_of_victory+3)** (0.8) / (7.5 + 0.006 *(-elo_diff))
return init_k*multiplier,init_k*multiplier
def s_value(home_score, away_score):
S_home,S_away=0,0
if home_score > away_score:
S_home = 1
elif away_score > home_score:
S_away = 1
else:
S_home,S_away=.5,.5
return S_home,S_away
def elo_update(home_score, away_score, home_rating,away_rating, home_advantage = 100.):
home_rating += home_advantage
elo_home = elo_prediction(home_rating,away_rating)
elo_away = 1 - elo_home
elo_diff = home_rating - away_rating
MOV = home_score - away_score
s_home,s_away = s_value(home_score,away_score)
if s_home>0:
K_home,K_away = k_factor(MOV,elo_diff)
else:
K_home,K_away = k_factor(MOV,elo_diff)
return K_home*(s_home-elo_home),K_away*(s_away-elo_away)
def elo_prediction(home_rating,away_rating):
return 1./(1 + 10 ** ((away_rating - home_rating) / (400.)))
def score_prediction(home_rating,away_rating):
return (home_rating-away_rating)/28.
class EloSimulation(object):
def __init__(self, games, update_function, label_dict, end_of_season_correction, prediction_function=None,):
self.update_function = update_function
self.games = games
self.ratings = {}
self.prediction_function = prediction_function
self.predictions = []
self.curr_season = defaultdict(lambda: self.games[0][1][label_dict['year_id']])
self.end_of_season_correction = end_of_season_correction
def train(self):
for idx, game in self.games:
new_year = game[label_dict['year_id']]
label_i = game[label_dict['fran_id']]
label_j = game[label_dict['opp_fran']]
if self.ratings.get(label_i, False ) == False:
self.ratings[label_i] = elo_lookup(label_i,game[label_dict['gameorder']])
if self.ratings.get(label_j,False )== False:
self.ratings[label_j] = elo_lookup(label_j,game[label_dict['gameorder']])
if self.curr_season[label_i]!=new_year:
self.curr_season[label_i]=new_year
self.ratings[label_i]=self.ratings[label_i]*.6+1505.*.4
elif self.curr_season[label_j]!=new_year:
self.curr_season[label_j]=new_year
self.ratings[label_j]=self.ratings[label_j]*.6+1505.*.4
self.predictions.append(elo_prediction(self.ratings[label_i]+100, self.ratings[label_j]))
#todo change below to just use event
update=self.update_function(game[label_dict['pts']],game[label_dict['opp_pts']], self.ratings[label_i], self.ratings[label_j])
self.ratings[label_i]+=update[0]
self.ratings[label_j]+=update[1]
def power_rankings(self):
power_rankings = sorted(self.ratings.items(), key=itemgetter(1), reverse=True)
power = []
for i, x in enumerate(power_rankings):
power.append((i + 1, x))
return power
label_dict = {
'year_id' :'year_id',
'fran_id' :'fran_id',
'opp_fran' :'opp_fran',
'gameorder' :'gameorder',
'pts' :"pts",
'opp_pts' :"opp_pts"
}
full_df = pd.read_csv("../../elo-simulations/nbaallelo.csv")
games=full_df[full_df['game_location']=='H'] #remove duplicated rows work with our elo implementation
games['SEASON']=games['year_id'].apply(lambda x: "%s-%s"%(x-1,x))
STARTING_LOC=0
def elo_lookup(fran_id,gameorder):
return full_df[(full_df['fran_id']==fran_id)&(full_df['gameorder']>=gameorder)]['elo_i'].iloc[0]
m = EloSimulation(
games = list(games[games['gameorder']>STARTING_LOC].iterrows()),
update_function = elo_update,
prediction_function = elo_prediction,
label_dict = label_dict,
end_of_season_correction = 1)
m.train()
m.power_rankings()
games['prediction']=m.predictions
games['predictedWinner']=games['prediction'].apply(lambda x: 1 if x>=.5 else 0)
games['winner']=games.apply(lambda x: x['pts']>=x['opp_pts'],axis=1)
from sklearn.metrics import confusion_matrix
conf_matrix=confusion_matrix(games['winner'],games['predictedWinner'])
top = float(conf_matrix[0][0]+conf_matrix[1][1])
botton = top + float(conf_matrix[0][1] + conf_matrix[1][0])
print(top/botton * 100)
|
75231
|
from collections.abc import (
AsyncIterator,
)
from typing import (
Any,
)
from ...database import (
DatabaseClient,
)
from .operations import (
MockedDatabaseOperation,
)
class MockedDatabaseClient(DatabaseClient):
"""For testing purposes"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.kwargs = kwargs
self._response = tuple()
async def _reset(self, **kwargs) -> None:
"""For testing purposes"""
self._response = tuple()
async def _execute(self, operation: MockedDatabaseOperation) -> None:
"""For testing purposes"""
self._response = operation.response
async def _fetch_all(self, *args, **kwargs) -> AsyncIterator[Any]:
"""For testing purposes"""
for value in self._response:
yield value
|
75239
|
import torch
import numpy as np
import shutil
import os
from data import ljspeech
import hparams as hp
def preprocess_ljspeech(filename):
in_dir = filename
out_dir = hp.mel_ground_truth
if not os.path.exists(out_dir):
os.makedirs(out_dir, exist_ok=True)
metadata = ljspeech.build_from_path(in_dir, out_dir)
write_metadata(metadata, out_dir)
shutil.move(os.path.join(hp.mel_ground_truth, "train.txt"),
os.path.join("data", "train.txt"))
def write_metadata(metadata, out_dir):
with open(os.path.join(out_dir, 'train.txt'), 'w', encoding='utf-8') as f:
for m in metadata:
f.write(m + '\n')
def main():
path = os.path.join("data", "LJSpeech-1.1")
preprocess_ljspeech(path)
if __name__ == "__main__":
main()
|
75240
|
from django.core.management.base import BaseCommand, CommandError
from canvas.thumbnailer import update_all_content
from canvas.models import Content
from canvas.upload import get_fs
from configuration import Config
from django.conf import settings
class Command(BaseCommand):
args = ''
help = 'Recreates all the thumbnails.'
def handle(self, *args, **options):
update_all_content(get_fs(*settings.IMAGE_FS), *args)
|
75256
|
from django.apps import AppConfig
class ProfileConfig(AppConfig):
label = "profile"
name = "edd.profile"
verbose_name = "User Profiles"
|
75275
|
import h5py
import numpy as np
from keras.datasets import mnist
from keras.utils import to_categorical
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float16')
x_test = x_test.astype('float16')
inputs = np.concatenate((x_train,x_test)) / 255
labels = np.concatenate((y_train,y_test)) # ints, 0 to 10
###########################################
# fix mis-labeled image(s) in Keras dataset
labels[10994] = 9
###########################################
targets = to_categorical(labels).astype("uint8")
string = h5py.special_dtype(vlen=str)
labels = np.array([str(label) for label in labels], dtype=string)
print("creating h5...")
with h5py.File("mnist.h5", "w") as h5:
dset = h5.create_dataset('inputs', data=[inputs], compression='gzip', compression_opts=9)
dset = h5.create_dataset('targets', data=[targets], compression='gzip', compression_opts=9)
dset = h5.create_dataset('labels', data=[labels], compression='gzip', compression_opts=9)
print("done!")
|
75334
|
class Model(object):
def __init__(self,configModel,utils,strTrial):
self.tag = configModel[0]
self.mode = configModel[1]
self.featureSet = configModel[2]
self.misc = configModel[3]
self.masterTest = utils.TEST_IDS_PATH
self.bootTrain = utils.MODEL_BOOT_PATH + \
'train' + '_t' + strTrial
self.bootCV = utils.MODEL_BOOT_PATH + \
'CV' + '_t' + strTrial
self.bootTest = utils.MODEL_BOOT_PATH + \
'test' + '_t' + strTrial
self.featTrain = utils.MODEL_FEATURED_PATH + self.tag + \
'_train' + '_t' + strTrial
self.featCV = utils.MODEL_FEATURED_PATH + self.tag + \
'_CV' + '_t' + strTrial
self.featTest = utils.MODEL_FEATURED_PATH + self.tag + \
'_test' + '_t' + strTrial
self.tmpTrain = utils.MODEL_TMP_PATH + self.tag + \
'_train' + '_t' + strTrial
self.tmpCV = utils.MODEL_TMP_PATH + self.tag + \
'_CV' + '_t' + strTrial
self.tmpTest = utils.MODEL_TMP_PATH + self.tag + \
'_test'+ '_t' + strTrial
self.runTrain = utils.MODEL_RUN_PATH + self.tag + \
'_train' + '_t' + strTrial
self.runCV = utils.MODEL_RUN_PATH + self.tag + \
'_CV' + '_t' + strTrial
self.runTest = utils.MODEL_RUN_PATH + self.tag + \
'_test' + '_t' + strTrial
self.predCV = utils.MODEL_PREDICT_PATH + self.tag + \
'_CV'+ '_t' + strTrial
self.predTest = utils.MODEL_PREDICT_PATH + self.tag + \
'_test' + '_t' + strTrial
self.predCVTmp = self.predCV + '_tmp'
self.predTestTmp= self.predTest + '_tmp'
self.trial = strTrial
self.movieTagPath = utils.MOVIE_TAG_PATH
self.userSocialPath = utils.USER_SOCIAL_PATH
self.userHistoryPath= utils.USER_HISTORY_PATH
def prependUserMovieToPredictions(self,idsPath,fixPath,savePath):
### Takes in a column of ratings as toFix
### Takes in user and movie id's through idsPath
### Makes user movie rating and saves toSave
### ratingsCol is a boolean for implying
### whether the input for idsPath
### has a column of ratings or not
import csv
data = csv.reader(open(idsPath,'rU'), delimiter="\t", quotechar='|')
fixData = open(fixPath, 'rU')
fixLines = fixData.readlines()
i = 0
output = []
for row in data :
output.append(row[0] + '\t' + row[1] + "\t" + fixLines[i])
i = i + 1
outfile = open(savePath, 'w')
outfile.writelines(["%s" % item for item in output])
def fixRun(self):
self.prependUserMovieToPredictions(self.masterTest,
self.predTestTmp,
self.predTest)
self.prependUserMovieToPredictions(self.bootCV,
self.predCVTmp,
self.predCV)
|
75362
|
from moncash import constants
class Environment(object):
def __init__(self, name, host):
self.__name__ = name
self.host = host
self.protocol = "https://"
if name == 'Sandbox':
self.redirect_url = constants.SANDBOX_REDIRECT_URL
elif name == 'Production':
self.redirect_url = constants.PROD_REDIRECT_URL
else:
raise EnvironmentError("Environment should be named 'Sandbox' or 'Production'")
Sandbox = Environment('Sandbox', constants.SANDBOX_HOST)
Production = Environment('Production', constants.PROD_HOST)
|
75370
|
import torch
import mmocr.utils as utils
from mmocr.models.builder import CONVERTORS
from .base import BaseConvertor
import numpy as np
@CONVERTORS.register_module()
class MasterConvertor(BaseConvertor):
"""Convert between text, index and tensor for encoder-decoder based
pipeline.
Args:
dict_type (str): Type of dict, should be one of {'DICT36', 'DICT90'}.
dict_file (None|str): Character dict file path. If not none,
higher priority than dict_type.
dict_list (None|list[str]): Character list. If not none, higher
priority than dict_type, but lower than dict_file.
with_unknown (bool): If True, add `UKN` token to class.
max_seq_len (int): Maximum sequence length of label.
lower (bool): If True, convert original string to lower case.
start_end_same (bool): Whether use the same index for
start and end token or not. Default: True.
"""
def __init__(self,
dict_type='DICT90',
dict_file=None,
dict_list=None,
with_unknown=True,
max_seq_len=40,
lower=False,
start_end_same=True,
**kwargs):
super().__init__(dict_type, dict_file, dict_list)
assert isinstance(with_unknown, bool)
assert isinstance(max_seq_len, int)
assert isinstance(lower, bool)
self.with_unknown = with_unknown
self.max_seq_len = max_seq_len
self.lower = lower
self.start_end_same = start_end_same
self.update_dict()
def update_dict(self):
start_token = '<SOS>'
end_token = '<EOS>'
unknown_token = '<UKN>'
padding_token = '<PAD>'
# unknown
self.unknown_idx = None
if self.with_unknown:
self.idx2char.append(unknown_token)
self.unknown_idx = len(self.idx2char) - 1
# SOS/EOS
self.idx2char.append(start_token)
self.start_idx = len(self.idx2char) - 1
if not self.start_end_same:
self.idx2char.append(end_token)
self.end_idx = len(self.idx2char) - 1
# padding
self.idx2char.append(padding_token)
self.padding_idx = len(self.idx2char) - 1
# update char2idx
self.char2idx = {}
for idx, char in enumerate(self.idx2char):
self.char2idx[char] = idx
def str2tensor(self, strings):
"""
Convert text-string into tensor.
Args:
strings (list[str]): ['hello', 'world']
Returns:
dict (str: Tensor | list[tensor]):
tensors (list[Tensor]): [torch.Tensor([1,2,3,3,4]),
torch.Tensor([5,4,6,3,7])]
padded_targets (Tensor(bsz * max_seq_len))
"""
# ordinary OCR task strings is list of str, but table master is list of list.
assert utils.is_type_list(strings, str) or utils.is_type_list(strings, list)
tensors, padded_targets = [], []
indexes = self.str2idx(strings)
for index in indexes:
tensor = torch.LongTensor(index)
tensors.append(tensor)
# target tensor for loss
src_target = torch.LongTensor(tensor.size(0) + 2).fill_(0)
src_target[-1] = self.end_idx
src_target[0] = self.start_idx
src_target[1:-1] = tensor
padded_target = (torch.ones(self.max_seq_len) *
self.padding_idx).long()
char_num = src_target.size(0)
if char_num > self.max_seq_len:
# TODO:大于max_seq_len-2的,应该跳过
padded_target = src_target[:self.max_seq_len]
else:
# TODO:这里是最后一个是PAD token,而不是EOS,与FASTOCR不同,其最后一个是EOS.
padded_target[:char_num] = src_target
padded_targets.append(padded_target)
padded_targets = torch.stack(padded_targets, 0).long()
return {'targets': tensors, 'padded_targets': padded_targets}
def tensor2idx(self, outputs, img_metas=None):
"""
Convert output tensor to text-index
Args:
outputs (tensor): model outputs with size: N * T * C
img_metas (list[dict]): Each dict contains one image info.
Returns:
indexes (list[list[int]]): [[1,2,3,3,4], [5,4,6,3,7]]
scores (list[list[float]]): [[0.9,0.8,0.95,0.97,0.94],
[0.9,0.9,0.98,0.97,0.96]]
"""
batch_size = outputs.size(0)
ignore_indexes = [self.padding_idx]
indexes, scores = [], []
for idx in range(batch_size):
seq = outputs[idx, :, :]
seq = seq.softmax(-1)
max_value, max_idx = torch.max(seq, -1)
str_index, str_score = [], []
output_index = max_idx.cpu().detach().numpy().tolist()
output_score = max_value.cpu().detach().numpy().tolist()
for char_index, char_score in zip(output_index, output_score):
if char_index in ignore_indexes:
continue
if char_index == self.end_idx:
break
str_index.append(char_index)
str_score.append(char_score)
indexes.append(str_index)
scores.append(str_score)
return indexes, scores
|
75422
|
from rply.token import BaseBox
from vython.errors import error, Errors as errors
import sys
class BinaryOp(BaseBox):
def __init__(self, left, right):
self.left = left
self.right = right
if self.right.kind == "string" or self.left.kind == "string":
self.kind = "string"
else:
self.kind = self.left.kind
class Sum(BinaryOp):
def eval(self):
try:
return self.left.eval() + self.right.eval()
except:
try:
return self.left.sum(self.right)
except:
error(
errors.IMPOSSIBLEOPERATION, "", {
"type": "values, types, operationtype",
"operationtype": "Addition",
"values": [self.left.eval(),
self.right.eval()],
"types":
[self.left.kind.tostr(),
self.right.kind.tostr()]
})
sys.exit(1)
class Sub(BinaryOp):
def eval(self):
try:
return self.left.eval() - self.right.eval()
except:
try:
return self.left.sub(self.right)
except:
error(
errors.IMPOSSIBLEOPERATION, "", {
"type": "values, types, operationtype",
"operationtype": "Substraction",
"values": [self.left.eval(),
self.right.eval()],
"types":
[self.left.kind.tostr(),
self.right.kind.tostr()]
})
sys.exit(1)
class Mul(BinaryOp):
def eval(self):
try:
return self.left.eval() * self.right.eval()
except:
try:
return self.left.mul(self.right)
except:
error(
errors.IMPOSSIBLEOPERATION, "", {
"type": "values, types, operationtype",
"operationtype": "Multiplication",
"values": [self.left.eval(),
self.right.eval()],
"types":
[self.left.kind.tostr(),
self.right.kind.tostr()]
})
sys.exit(1)
class Div(BinaryOp):
def eval(self):
try:
return self.left.eval() / self.right.eval()
except:
try:
return self.left.div(self.right)
except:
error(
errors.IMPOSSIBLEOPERATION, "", {
"type": "values, types, operationtype",
"operationtype": "Division",
"values": [self.left.eval(),
self.right.eval()],
"types":
[self.left.kind.tostr(),
self.right.kind.tostr()]
})
sys.exit(1)
class DivEu(BinaryOp):
def eval(self):
try:
return self.left.eval() // self.right.eval()
except:
try:
return self.left.diveu(self.right)
except:
error(
errors.IMPOSSIBLEOPERATION, "", {
"type": "values, types, operationtype",
"operationtype": "Euclidean Division",
"values": [self.left.eval(),
self.right.eval()],
"types":
[self.left.kind.tostr(),
self.right.kind.tostr()]
})
sys.exit(1)
class Pow(BinaryOp):
def eval(self):
try:
return self.left.eval()**self.right.eval()
except:
try:
return self.left.pow(self.right)
except:
error(
errors.IMPOSSIBLEOPERATION, "", {
"type": "values, types, operationtype",
"operationtype": "Power",
"values": [self.left.eval(),
self.right.eval()],
"types":
[self.left.kind.tostr(),
self.right.kind.tostr()]
})
sys.exit(1)
class Mod(BinaryOp):
def eval(self):
try:
return self.left.eval() % self.right.eval()
except:
try:
return self.left.mod(self.right)
except:
error(
errors.IMPOSSIBLEOPERATION, "", {
"type": "values, types, operationtype",
"operationtype": "Modulo",
"values": [self.left.eval(),
self.right.eval()],
"types":
[self.left.kind.tostr(),
self.right.kind.tostr()]
})
sys.exit(1)
|
75446
|
import sys
from time import time
class Progress:
"""
"""
def __init__(self, iterable, size = None, interval = 0.1):
"""
Args:
iterable
size (int): max size of iterable
interval (float): update bar interval second, default is `0.1`
Attrs:
BAR_LENGTH (int): bar length, default is `32`
SYMBOL_DONE (str): symbol indicating complation
SYMBOL_REST (str): symbol indicating remaining
prefix (str): string template before progress bar
suffix (str): string template after progress bar
template (str): string template for rendering, `{prefix} {bar} {suffix}`
"""
self.iterable = iterable
self.interval = interval
self.batch = 1
self.size = size
if hasattr(iterable, '__len__'):
self.size = len(iterable)
# is pytorch dataloader
if hasattr(iterable, 'batch_size'):
self.batch = getattr(iterable, 'batch_size')
self.size = len(iterable.dataset)
self.idx = 0
self.time = None
self.BAR_LENGTH = 32
self.SYMBOL_DONE = '█'
self.SYMBOL_REST = '.'
self.prefix = ""
self.suffix = ""
if self.size is None:
self.template = "{prefix} {done} iters {time:.2f}s {suffix}"
else:
self.template = "{prefix} {percent:3.0f}%|{bar}| [{done}/{size}] {time:.2f}s {suffix}"
def __len__(self):
return self.size
def __iter__(self):
self.reset()
# reset time
start = time()
last_time = start
for item in self.iterable:
yield item
self.idx += 1
curr_time = time()
self.time = curr_time - start
# skip update if delta is too small
if curr_time - last_time < self.interval:
continue
last_time = curr_time
# update bar
self.flush()
# finally updating for the status of end
self.flush()
self.end()
def reset(self):
# reset index
self.idx = 0
def end(self):
self.print('\n')
def flush(self):
if self.size is None:
done = self.idx * self.batch
percent = 0
bar = None
else:
done = min(self.idx * self.batch, self.size)
percent = done / self.size
bar = (self.SYMBOL_DONE * int(percent * self.BAR_LENGTH)).ljust(self.BAR_LENGTH, self.SYMBOL_REST)
self.print('\r' + self.template.format(
percent = percent * 100,
bar = bar,
done = done,
size = self.size,
time = self.time,
tps = done / self.time,
prefix = self.prefix,
suffix = self.suffix,
))
def print(self, text):
sys.stdout.write(text)
sys.stdout.flush()
|
75471
|
from ariadne import QueryType
from neo4j_graphql_py import neo4j_graphql
query = QueryType()
@query.field('Movie')
@query.field('MoviesByYear')
def resolve(obj, info, **kwargs):
return neo4j_graphql(obj, info.context, info, **kwargs)
|
75473
|
def set_fill_color(red, green, blue):
pass
def draw_rectangle(corner, other_corner):
pass
set_fill_color(red=161, green=219, blue=114)
draw_rectangle(corner=(105,20), other_corner=(60,60))
|
75545
|
import pytest
import unittest
from pydu.dict import AttrDict, LookupDict, CaseInsensitiveDict, OrderedDefaultDict, attrify
class TestAttrDict:
def test_attr_access_with_init(self):
d = AttrDict(key=1)
assert d['key'] == 1
assert d.key == 1
def test_attr_access_without_init(self):
d = AttrDict()
d['key'] = 1
assert d['key'] == 1
assert d.key == 1
d.anotherkey = 1
assert d.anotherkey == 1
assert d['anotherkey'] == 1
def test_attr_delete(self):
d = AttrDict(key=1)
del d.key
with pytest.raises(AttributeError):
del d.key
def test_repr(self):
d = AttrDict()
assert repr(d) == '<AttrDict {}>'
class TestLooUpDict:
def test_key_exist(self):
d = LookupDict()
d['key'] = 1
assert d['key'] == 1
def test_key_not_exist(self):
d = LookupDict()
assert d['key'] is None
class TestCaseInsensitiveDict(unittest.TestCase):
def setUp(self):
self.d = CaseInsensitiveDict()
self.d['Accept'] = 1
def test_ci_dict_set(self):
assert self.d['aCCept'] == 1
assert list(self.d) == ['Accept']
def test_ci_dict_del(self):
del self.d['accept']
assert not self.d
def test_ci_dict_copy_and_equal(self):
d = self.d.copy()
assert d == self.d
class TestOrderedDefaultDict:
def test_default_normal(self):
d = OrderedDefaultDict(int)
assert d[1] == 0
assert d['a'] == 0
d[2] = 2
assert d[2] == 2
assert list(d.keys()) == [1, 'a', 2]
d = OrderedDefaultDict(int, a=1)
assert d['a'] == 1
def test_default_factory_not_callable(self):
with pytest.raises(TypeError):
OrderedDefaultDict('notcallable')
def test_default_factory_none(self):
d = OrderedDefaultDict()
with pytest.raises(KeyError):
d[1]
def test_copy(self):
d1 = OrderedDefaultDict(int, a=[])
d2 = d1.copy()
assert d2['a'] == []
d1['a'].append(1)
assert d2['a'] == [1]
def test_deepcopy(self):
import copy
d1 = OrderedDefaultDict(int, a=[])
d2 = copy.deepcopy(d1)
assert d2['a'] == []
d1['a'].append(1)
assert d2['a'] == []
def test_repr(self):
d = OrderedDefaultDict(int, a=1)
assert repr(d).startswith('OrderedDefaultDict')
def test_attrify():
attrd = attrify({
'a': [1, 2, {'b': 'b'}],
'c': 'c',
})
assert attrd.a == [1, 2, {'b': 'b'}]
assert attrd.a[2].b == 'b'
assert attrd.c == 'c'
attrd = attrify((1, 2))
assert attrd == (1, 2)
attrd = attrify({
'a': 1,
'b': (1, 2)
})
assert attrd.a == 1
assert attrd.b == (1, 2)
|
75576
|
import operator
from django.db.models import Q as DjangoQ
from django.db.models.lookups import Lookup
from ..ql import Q as SearchQ
def resolve_filter_value(v):
"""Resolve a filter value to one that the search API can handle
We can't pass model instances for example.
"""
return getattr(v, 'pk', v)
class SearchQueryAdapter(object):
"""Adapter class to wrap a `search.query.SearchQuery` instance to behaves
like a Django queryset.
We only implement 'enough' to allow it's use within a rest_framework
viewset and django_filter Filterset.
"""
def __init__(self, query=None, model=None, queryset=None, _is_none=False, ids_only=False):
self._is_none = _is_none
self._query = query
self._queryset = queryset
self.ids_only = ids_only
self.model = None if queryset is None else queryset.model
@classmethod
def from_queryset(cls, queryset, ids_only=False):
"""Construct a query adapter from a Django queryset"""
if isinstance(queryset, cls):
return queryset
filters = (
{} if queryset.query.is_empty() else
cls.get_filters_from_queryset(queryset)
)
search_query = cls.filters_to_search_query(
filters,
queryset.model,
ids_only=ids_only
)
return cls(
search_query,
queryset=queryset,
ids_only=ids_only,
_is_none=queryset.query.is_empty()
)
@classmethod
def filters_to_search_query(cls, filters, model, query=None, ids_only=False):
"""Convert a list of nested lookups filters (a result of
get_filters_from_queryset) into a SearchQuery objects.
"""
from .utils import get_search_query
search_query = query or get_search_query(model, ids_only=ids_only)
if not filters:
return search_query
connector = filters['connector']
children = filters['children']
q_objects = None
for child in children:
if isinstance(child, tuple):
q = SearchQ(
**{
"{}__{}".format(child[0], child[1]): child[2]
}
)
operator_func = getattr(operator, connector.lower() + '_', 'and_')
q_objects = operator_func(q_objects, q) if q_objects else q
else:
search_query = cls.filters_to_search_query(child, model, query=search_query)
if q_objects is not None:
search_query.query.add_q(q_objects, conn=connector.upper())
return search_query
@classmethod
def get_filters_from_queryset(cls, queryset, where_node=None):
"""Translates django queryset filters into a nested dict of tuples
Example:
>>> queryset = (
Profile.objects
.filter(given_name='pete')
.filter(Q(email='<EMAIL>') | Q(email='<EMAIL>'))
)
>>> get_filters_from_queryset(queryset)
{
u'children': [
(u'given_name', u'exact', 'pete'),
{
u'children': [
(u'email', u'exact', '<EMAIL>'),
(u'email', u'exact', '<EMAIL>')
],
u'connector': u'OR'
}
],
u'connector': u'AND'
}
"""
where_node = where_node or queryset.query.where
children = []
node_filters = {
u'connector': unicode(where_node.connector),
}
for node in where_node.children:
# Normalize expressions which are an AND with a single child and
# use the child node as the expression instead. This happens if you
# add whole querysets together.
if getattr(node, 'connector', None) == 'AND' and len(node.children) == 1:
node = node.children[0]
if isinstance(node, Lookup): # Lookup
children.append(cls.normalize_lookup(node))
else: # WhereNode
children.append(
cls.get_filters_from_queryset(
queryset,
node,
)
)
node_filters[u'children'] = children
return node_filters
@classmethod
def model_q_to_search_q(cls, _q):
"""Transform a `django.db.model.Q` tree to `search.ql.Q` tree.
TODO: handle negation
"""
if type(_q) is tuple:
k, v = _q
return (k, resolve_filter_value(v))
if not _q.children:
return None
q = SearchQ()
q.conn = _q.connector
q.children = filter(
lambda x: x is not None,
map(cls. model_q_to_search_q, _q.children)
)
q.inverted = _q.negated
if not q.children:
return None
# TODO: handle negation?
return q
@classmethod
def normalize_lookup(cls, node):
"""Converts Django Lookup into a single tuple or a list of tuples if
the lookup_name is IN
Example for lookup_name IN and rhs ['<EMAIL>', '<EMAIL>']:
{
u'connector': u'OR',
u'children': [
(u'email', u'=', u'<EMAIL>'),
(u'email', u'=', u'<EMAIL>')
]
}
Example for lookup_name that's not IN (exact in this case) and value
'<EMAIL>': (u'email', u'=', u'<EMAIL>')
"""
target = unicode(node.lhs.target.name)
lookup_name = unicode(node.lookup_name)
# convert "IN" into a list of "="
if lookup_name.lower() == u'in':
return {
u'connector': u'OR',
u'children': [
(
target,
u'exact',
value,
)
for value in node.rhs
]
}
return (
target,
lookup_name,
node.rhs,
)
def _clone(self):
return self.__class__(
model=self.model,
queryset=self._queryset,
_is_none=self._is_none,
ids_only=self.ids_only
)
def _transform_filters(self, *args, **kwargs):
"""Normalize a set of filter kwargs intended for Django queryset to
those than can be used with a search queryset
Returns tuple of (args, kwargs) to pass directly to SearchQuery.filter
"""
_args = [
self.model_q_to_search_q(_q) if type(_q) is DjangoQ else _q
for _q in args
]
_kwargs = {k: resolve_filter_value(v) for k, v in kwargs.iteritems()}
return _args, _kwargs
def as_django_queryset(self):
"""Get the Django queryset for objects in this set of documents, by
looking up on pk__in.
Returns
A tuple where the first item is the pk__in queryset, and the second
is a list of the document PKs being looked up (in case the calling
function wants to re-order them into the PK order).
"""
# TODO: This could be converted to an IDs only query for efficiency if
# `self` isn't already evaluated or isn't alread an `ids_only` query
doc_pks = [doc.pk for doc in self]
return (
self.model.objects
.filter(pk__in=doc_pks)
.prefetch_related(*self._queryset._prefetch_related_lookups)
), doc_pks
def as_model_objects(self):
"""Get the IDs in the order they came back from the search API...
"""
qs, doc_pks = self.as_django_queryset()
results = list(qs)
# Since we do pk__in to get the objects from the datastore, we lose
# any ordering there was. To recreate it, we have to manually order
# the list back into whatever order the pks from the search API were in.
key_func = lambda x: doc_pks.index(unicode(x.pk))
results.sort(key=key_func)
return results
def all(self):
clone = self._clone()
clone._query = self._query._clone()
return clone
def __len__(self):
return 0 if self._is_none else self._query.__len__()
def __iter__(self):
if self._is_none:
return iter([])
else:
return self._query.__iter__()
def __getitem__(self, s):
if isinstance(s, slice):
clone = self._clone()
clone._query = self._query.__getitem__(s)
return clone
else:
return self._query.__getitem__(s)
def filter(self, *args, **kwargs):
args, kwargs = self._transform_filters(*args, **kwargs)
args = args or []
clone = self._clone()
clone._query = self._query.filter(*args, **kwargs)
return clone
def none(self):
clone = self._clone()
clone._is_none = True
clone._query = self._query._clone()
return clone
def count(self):
return 0 if self._is_none else len(self._query)
def order_by(self, *fields):
qs = self._query.order_by(*fields)
clone = self._clone()
clone._query = qs
return clone
def keywords(self, query_string):
qs = self._query.keywords(query_string)
clone = self._clone()
clone._query = qs
return clone
|
75590
|
import unittest
import pytest
import paramak
class TestExtrudeHollowRectangle(unittest.TestCase):
def setUp(self):
self.test_shape = paramak.ExtrudeHollowRectangle(
height=10, width=15, casing_thickness=1, distance=2
)
def test_default_parameters(self):
"""Checks that the default parameters of a ExtrudeHollowRectangle are
correct."""
assert self.test_shape.center_point == (0, 0)
def test_processed_points_calculation(self):
"""Checks that the processed_points used to construct the
ExtrudeHollowRectangle are calculated correctly from the parameters given."""
assert self.test_shape.processed_points == [
(7.5, 5.0, "straight"),
(7.5, -5.0, "straight"),
(-7.5, -5.0, "straight"),
(-7.5, 5.0, "straight"),
(7.5, 5.0, "straight"),
(8.5, 6.0, "straight"),
(8.5, -6.0, "straight"),
(-8.5, -6.0, "straight"),
(-8.5, 6.0, "straight"),
(8.5, 6.0, "straight"),
(7.5, 5.0, "straight"),
]
def test_points_calculation(self):
"""Checks that the points used to construct the ExtrudeHollowRectangle are
calculated correctly from the parameters given."""
print(self.test_shape.points)
assert self.test_shape.points == [
(7.5, 5.0),
(7.5, -5.0),
(-7.5, -5.0),
(-7.5, 5.0),
(7.5, 5.0),
(8.5, 6.0),
(8.5, -6.0),
(-8.5, -6.0),
(-8.5, 6.0),
(8.5, 6.0),
]
def test_creation(self):
"""Creates a rectangular extrusion using the ExtrudeHollowRectangle
parametric component and checks that a cadquery solid is created."""
assert self.test_shape.solid is not None
assert self.test_shape.volume() > 100
def test_absolute_volume(self):
"""Creates a rectangular extrusion using the ExtrudeHollowRectangle
parametric component and checks that the volume is correct"""
assert self.test_shape.volume() == pytest.approx((17 * 12 * 2) - (15 * 10 * 2))
def test_absolute_areas(self):
"""Creates a rectangular extrusion using the ExtrudeHollowRectangle
parametric component and checks that the areas are correct"""
assert len(self.test_shape.areas) == 10
assert len(set([round(i) for i in self.test_shape.areas])) == 5
assert self.test_shape.areas.count(pytest.approx(15 * 2)) == 2
assert self.test_shape.areas.count(pytest.approx(10 * 2)) == 2
|
75602
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from numpy import asarray
from scipy.spatial import Voronoi
from scipy.spatial import Delaunay
__all__ = [
'delaunay_from_points_numpy',
'voronoi_from_points_numpy',
]
def delaunay_from_points_numpy(points):
"""Computes the delaunay triangulation for a list of points using Numpy.
Parameters
----------
points : sequence of tuple
XYZ coordinates of the original points.
boundary : sequence of tuples
list of ordered points describing the outer boundary (optional)
holes : list of sequences of tuples
list of polygons (ordered points describing internal holes (optional)
Returns
-------
list
The faces of the triangulation.
Each face is a triplet of indices referring to the list of point coordinates.
Examples
--------
>>>
"""
xyz = asarray(points)
d = Delaunay(xyz[:, 0:2])
return d.simplices
def voronoi_from_points_numpy(points):
"""Generate a voronoi diagram from a set of points.
Parameters
----------
points : list of list of float
XYZ coordinates of the voronoi sites.
Returns
-------
Examples
--------
>>>
"""
points = asarray(points)
voronoi = Voronoi(points)
return voronoi
|
75610
|
import sys
from ..api import plot
import fuc
import pysam
description = f"""
Plot allele fraction profile from VcfFrame[Imported].
"""
def create_parser(subparsers):
parser = fuc.api.common._add_parser(
subparsers,
fuc.api.common._script_name(),
description=description,
help='Plot allele fraction profile with VCF data.',
)
parser.add_argument(
'imported_variants',
metavar='imported-variants',
help='Archive file with the semantic type \n'
'VcfFrame[Imported].'
)
parser.add_argument(
'--path',
metavar='PATH',
help='Create plots in this directory.'
)
parser.add_argument(
'--samples',
metavar='TEXT',
nargs='+',
help='Create plots only for these samples.'
)
parser.add_argument(
'--fontsize',
metavar='FLOAT',
type=float,
default=25,
help='Text fontsize (default: 25).'
)
def main(args):
plot.plot_vcf_allele_fraction(
args.imported_variants, path=args.path, samples=args.samples,
fontsize=args.fontsize
)
|
75616
|
f = plt.figure(figsize=(6,6))
plt.scatter(pres.swing_full, lp.weights.lag_spatial(w, pres.swing_full))
plt.plot((-.3,.1),(-.3,.1), color='k')
plt.title('$I = {:.3f} \ \ (p < {:.3f})$'.format(moran.I,moran.p_sim))
|
75619
|
import sys
import pprint
pp = pprint.PrettyPrinter();
import pymongo
from pymongo import MongoClient
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 12 20:30:54 2016
@author: ryanlim, jpitts
Requirements:
- pymongo needs to be installed
- mongodb needs to be running
- brigade-matchmaker web needs to be running
(populates the users table in the brigade-matchmaker collection)
For installation instructions:
http://api.mongodb.com/python/current/installation.html
For the API:
http://api.mongodb.com/python/current/api/pymongo/collection.html
Install dependencies:
python -m pip install pymongo
Interface:
param skills_list: Skills that the user can contribute.
param skills_offered_list: Skills the the user wants to learn.
param interests_list: Interests of the user.
param goals_list: Project-related goals of the user.
@usage:
python ./db-match-algo.py client-dev/javascript,data-sci/python null housing developer,presenter
python ./db-match-algo.py data-sci null homelessness developer
python ./db-match-algo.py ruby null null developer,learner
python ./db-match-algo.py null null null leader
python ./db-match-algo.py null client-dev/javascript null null
"""
# database configuration
collection_name = 'brigade-matchmaker'
client = MongoClient('localhost', 27017)
db = client[collection_name]
# load the taxonomies and attributes from the database
""" NOTE: taxn_attributes below describes the data structure used in
breaking down the hierarchy of attributes submitted by users
taxn_attributes = {
'skills': [
{name: 'server-dev', parent: 'skills', synonyms: ['back-end']},
{name: 'nodejs', parent: 'server-dev', synonyms: ['node']},
{name: 'python', parent: 'server-dev', synonyms: ['django']},
],
'interests': [
],
'goals': [
]
}
"""
taxonomies = []
taxn_attributes = {}
taxn_name = ""
for attribute in db.projecttaxonomies.find({}):
#pp.pprint(need)
if attribute['parent'] == None:
#print "taxonomy=" + attribute['name']
taxonomies.append(attribute)
taxn_name=attribute['name']
taxn_attributes[taxn_name] = []
else:
#print " attribute=" + attribute['name']
taxn_attributes[taxn_name].append(attribute)
""" NOTE: projects_list below is for understanding the data structure
used in the algo (soon to reflect what is in the database)
projects_list = [
{
'id':'ux-research',
'name':'UX Research',
'interests':['all','community-organizer'],
'skills_needed':['python','javascript','html'],
'goals_needed':['developer','helper']
},
{
'id':'data-sciences',
'name':'Data Sciences',
'interests':['all'],
'skills_needed':['python'],
'goals_needed':['developer']
}
]
"""
# load the projects list from the database
projects_list = []
projects_count = 0
for project in db.projects.find({}):
#print 'load ' + project['name']
#print project['_id']
#pp.pprint(project['matchingConfig'])
#pp.pprint(project['todoItems'])
# NOTE: the algo will add the following data to project:
# project['interests'] = []
# project['interests_total'] = 0
# project['interests_matched'] = []
# project['skills_offered'] = []
# project['skills_offered_categories'] = []
# project['skills_offered_total'] = 0
# project['skills_offered_matched'] = []
# project['skills_needed'] = []
# project['skills_needed_categories'] = []
# project['skills_total'] = 0
# project['skills_matched'] = []
# project['goals_needed'] = []
# project['goals_total'] = 0
# project['goals_matched'] = []
# interests
project['interests'] = []
for need in project['matchingConfig']['interests']:
project['interests'].append(need)
# skills offered
project['skills_offered'] = []
project['skills_offered_categories'] = []
for offering in project['matchingConfig']['skillsOffered']:
project['skills_offered'].append(offering)
if "/" in offering:
#print('category: ' + offering.split("/")[0])
project['skills_offered_categories'].append(offering.split("/")[0])
# skills needed
project['skills_needed'] = []
project['skills_needed_categories'] = []
for need in project['matchingConfig']['skillsNeeded']:
project['skills_needed'].append(need)
if "/" in need:
#print('category: ' + offering.split("/")[0])
project['skills_needed_categories'].append(need.split("/")[0])
# goals
project['goals_needed'] = []
for need in project['matchingConfig']['goalsNeeded']:
project['goals_needed'].append(need)
projects_list.append(project)
projects_count += 1
# END loading projects list
def matchmaking (
skills_list, # targeting skills needed by project
skills_offered_list, # targeting skills offered by project
interests_list, # targeting shared interests
goals_list # targeting goals needed by project
):
"""
print 'matchmaking()'
print 'skills='
pp.pprint(skills_list)
print 'skills_offered='
pp.pprint(skills_offered_list)
print 'interests='
pp.pprint(interests_list)
print 'goals='
pp.pprint(goals_list)
"""
#iterate over the projects
for project in projects_list:
# factors to prioritize skills
skills_factor = 2
skills_offered_factor = 2
interests_factor = 1
goals_factor = 1
project['user_score'] = 0
# in this project hold the totals for this user's
# skills, interests, and goals
project['skills_total'] = 0
project['skills_matched'] = []
project['skills_offered_total'] = 0
project['skills_offered_matched'] = []
project['interests_total'] = 0
project['interests_matched'] = []
project['goals_total'] = 0
project['goals_matched'] = []
'''
iterate over the skills_list and get the corresponding
values for each skill and the total value from the project
'''
if len(skills_list) > 0:
for skill in skills_list:
if skill in project['skills_needed']:
project['skills_total'] += 1
project['skills_matched'].append(skill)
# category match# category match: category match to increase total score
if "/" not in skill and skill in project['skills_needed_categories']:
#print 'skill needed = ' + skill
#pp.pprint(project['skills_needed_categories'])
project['skills_total'] += 1
project['skills_matched'].append(skill)
# NOTE: initial work on category-related scoring
#elif "/" in skill and skill.split("/")[0] in project['skills_needed_categories']:
# project['skills_total'] += 1
'''
iterate over the skills_offered_list and get the corresponding
values for each skill offered and the total value from the project
'''
if len(skills_offered_list) > 0:
for offering in skills_offered_list:
if offering in project['skills_offered']:
project['skills_offered_total'] += 1
project['skills_offered_matched'].append(offering)
# category match: category match to increase total score
if "/" not in offering and offering in project['skills_offered_categories']:
#print 'skill offered ' + offering
#pp.pprint(project['skills_offered_categories'])
project['skills_offered_total'] += 1
project['skills_offered_matched'].append(skill)
# NOTE: initial work on category-related scoring
#elif "/" in offering and offering.split("/")[0] in project['skills_needed_categories']:
# project['skills_total'] += 1
'''
iterate over the interests_list and get the corresponding
values for each interest and the total value from the project
'''
if len(interests_list) > 0:
for interest in interests_list:
if interest in project['interests']:
project['interests_total'] += 1
project['interests_matched'].append(interest)
'''
iterate over the goals_list and get the corresponding
values for each goal and the total value from the project
'''
if len(goals_list) > 0:
for goal in goals_list:
if goal in project['goals_needed']:
project['goals_total'] += 1
project['goals_matched'].append(goal)
#Find the weighted total for the project
project_total = 0
project_total += (skills_factor * project['skills_total'])
project_total += (skills_offered_factor * project['skills_offered_total'])
project_total += (interests_factor * project['interests_total'])
project_total += (goals_factor * project['goals_total'])
#add the weighted total to the project_scores list
project['user_score'] = project_total
"""
print
print 'User match w/ ' + project['name']
print ' skills ' + str(project['skills_total'])
pp.pprint(project['skills_matched'])
print ' skills offered ' + str(project['skills_offered_total'])
pp.pprint(project['skills_offered_matched'])
print ' interests ' + str(project['interests_total'])
print ' goals ' + str(project['goals_total'])
print ' total score = ' + str(project_total)
"""
#create dictionary for project - key and project_score - value and set up values
project_dict = {}
#sorted_projects = sorted(project_list, key=lambda k: k['user_score'])
from operator import itemgetter
sorted_projects = sorted(projects_list, key=itemgetter('user_score'), reverse=True)
outputln = ""
for project in sorted_projects:
seq = (
str(project['_id']),
project['name'],
str(project['user_score']),
'skills',
str(project['skills_total']),
"(" + " ".join(project['skills_matched']) + ")",
'skillsOffered',
str(project['skills_offered_total']),
"(" + " ".join(project['skills_offered_matched']) + ")",
'interests',
str(project['interests_total']),
"(" + " ".join(project['interests_matched']) + ")",
'goals',
str(project['goals_total']),
"(" + " ".join(project['goals_matched']) + ")",
)
#pp.pprint(seq)
print(",".join(seq))
# if called from command line
if __name__ == "__main__":
skills = sys.argv[1] if (len(sys.argv) > 1 and sys.argv[1] != 'null') else ""
skills_list = skills.split(",")
skills_offered = sys.argv[2] if (len(sys.argv) > 2 and sys.argv[2] != 'null') else ""
skills_offered_list = skills_offered.split(",")
interests = sys.argv[3] if (len(sys.argv) > 3 and sys.argv[3] != 'null') else ""
interests_list = interests.split(",")
goals = sys.argv[4] if (len(sys.argv) > 4 and sys.argv[4] != 'null') else ""
goals_list = goals.split(",")
matchmaking (skills_list, skills_offered_list, interests_list, goals_list)
|
75628
|
from simple_playgrounds.playground.playgrounds import *
from simple_playgrounds.engine import Engine
from simple_playgrounds.agent.controllers import Keyboard
from simple_playgrounds.agent.agents import HeadAgent
import time
import cv2
my_agent = HeadAgent(controller=Keyboard(), lateral=True, interactive=True)
#################################
for playground_name, pg_class in PlaygroundRegister.playgrounds['demo'].items():
pg = pg_class()
pg.add_agent(my_agent, allow_overlapping=False)
engine = Engine(playground=pg, debug=False)
while engine.game_on:
actions = {my_agent: my_agent.controller.generate_actions()}
engine.step(actions)
cv2.imshow(
'playground',
engine.generate_playground_image()[:, :, ::-1])
cv2.waitKey(1)
if my_agent.reward != 0:
print(my_agent.reward)
time.sleep(0.05)
pg.remove_agent(my_agent)
|
75636
|
from .nlm3 import nlm3
from .nlm2 import nlm2
from .bilateral2 import bilateral2
from .bilateral3 import bilateral3
|
75737
|
import logging
import re
import shlex
import threading
import sshim
logging.basicConfig(level='DEBUG')
logger = logging.getLogger()
class Device(threading.Thread):
def __init__(self, script):
threading.Thread.__init__(self)
self.history = []
self.script = script
self.start()
def adduser(self, *args):
username = args[0]
self.script.writeline('user %s add!' % username)
commands = {'adduser': adduser}
def cursor(self, key):
if key == 'A':
self.script.writeline('up')
def prompt(self):
self.script.write('root@device # ')
def run(self):
while True:
self.prompt()
match = self.script.expect(re.compile('(?P<command>\S+)?\s*(?P<arguments>.*)'))
self.history.append(match.group(0))
groups = match.groupdict()
if groups['command'] == 'exit':
break
if groups['command'] in Device.commands:
Device.commands[groups['command']](self, *shlex.split(groups['arguments']))
else:
self.script.writeline('-bash: %s: command not found' % groups['command'])
server = sshim.Server(Device, port=3000)
try:
server.run()
except KeyboardInterrupt:
server.stop()
|
75766
|
import numpy as np
import nanonet.tb as tb
from test.test_hamiltonian_module import expected_bulk_silicon_band_structure
def test_simple_atomic_chain():
""" """
site_energy = -1.0
coupling = -1.0
l_const = 1.0
a = tb.Orbitals('A')
a.add_orbital(title='s', energy=-1, )
xyz_file = """1
H cell
A 0.0000000000 0.0000000000 0.0000000000
"""
tb.set_tb_params(PARAMS_A_A={'ss_sigma': -1.0})
h = tb.HamiltonianSp(xyz=xyz_file, nn_distance=1.1)
h.initialize()
PRIMITIVE_CELL = [[0, 0, l_const]]
h.set_periodic_bc(PRIMITIVE_CELL)
num_points = 10
kk = np.linspace(0, 3.14 / l_const, num_points, endpoint=True)
band_structure = []
for jj in range(num_points):
vals, _ = h.diagonalize_periodic_bc([0.0, 0.0, kk[jj]])
band_structure.append(vals)
band_structure = np.array(band_structure)
desired_value = site_energy + 2 * coupling * np.cos(l_const * kk)
np.testing.assert_allclose(band_structure, desired_value[:, np.newaxis], atol=1e-9)
def test_atomic_chain_two_kinds_of_atoms():
""" """
site_energy1 = -1.0
site_energy2 = -2.0
coupling = -1.0
l_const = 2.0
a = tb.Orbitals('A')
a.add_orbital(title='s', energy=site_energy1, )
b = tb.Orbitals('B')
b.add_orbital(title='s', energy=site_energy2, )
xyz_file = """2
H cell
A 0.0000000000 0.0000000000 0.0000000000
B 0.0000000000 0.0000000000 1.0000000000
"""
tb.set_tb_params(PARAMS_A_B={'ss_sigma': coupling})
h = tb.HamiltonianSp(xyz=xyz_file, nn_distance=1.1)
h.initialize()
PRIMITIVE_CELL = [[0, 0, l_const]]
h.set_periodic_bc(PRIMITIVE_CELL)
num_points = 10
kk = np.linspace(0, 3.14 / 2, num_points, endpoint=True)
band_structure = []
for jj in range(num_points):
vals, _ = h.diagonalize_periodic_bc([0.0, 0.0, kk[jj]])
band_structure.append(vals)
band_structure = np.array(band_structure)
desired_value = np.zeros(band_structure.shape)
b = site_energy1 + site_energy2
c = site_energy1 * site_energy2 - (2.0 * coupling * np.cos(0.5 * kk * l_const)) ** 2
desired_value[:, 0] = 0.5 * (b - np.sqrt(b ** 2 - 4.0 * c))
desired_value[:, 1] = 0.5 * (b + np.sqrt(b ** 2 - 4.0 * c))
np.testing.assert_allclose(band_structure, desired_value, atol=1e-9)
def test_bulk_silicon():
""" """
a_si = 5.50
PRIMITIVE_CELL = [[0, 0.5 * a_si, 0.5 * a_si],
[0.5 * a_si, 0, 0.5 * a_si],
[0.5 * a_si, 0.5 * a_si, 0]]
tb.Orbitals.orbital_sets = {'Si': 'SiliconSP3D5S'}
xyz_file = """2
Si2 cell
Si1 0.0000000000 0.0000000000 0.0000000000
Si2 1.3750000000 1.3750000000 1.3750000000
"""
h = tb.HamiltonianSp(xyz=xyz_file, nn_distance=2.5)
h.initialize()
h.set_periodic_bc(PRIMITIVE_CELL)
sym_points = ['L', 'GAMMA', 'X']
num_points = [10, 25]
k = tb.get_k_coords(sym_points, num_points, 'Si')
band_sructure = []
vals = np.zeros((sum(num_points), h.num_eigs), dtype=complex)
for jj, item in enumerate(k):
vals[jj, :], _ = h.diagonalize_periodic_bc(item)
band_structure = np.real(np.array(vals))
np.testing.assert_allclose(band_structure, expected_bulk_silicon_band_structure()[:,:h.num_eigs], atol=1e-4)
if __name__ == '__main__':
# test_simple_atomic_chain()
# test_atomic_chain_two_kinds_of_atoms()
test_bulk_silicon()
|
75788
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import json
import os
import re
import mock
import requests_mock
from six.moves.urllib import parse as urlparse
import testtools
# Setup dummy environment variables so 'hook' can be imported
os.environ['CF_EMAIL'] = "<EMAIL>"
os.environ['CF_KEY'] = "a_cloudflare_example_key"
import hook # noqa
CF_API_HOST = "api.cloudflare.com"
CF_API_PATH = "/client/v4"
CF_API_SCHEME = "https"
class TestBase(testtools.TestCase):
def setUp(self):
super(TestBase, self).setUp()
self.expected_headers = {
'Content-Type': 'application/json',
'X-Auth-Email': "<EMAIL>'com",
'X-Auth-Key': 'a_cloudflare_example_key',
}
ExpectedRequestsData = collections.namedtuple(
'ExpectedRequestsData', ['method', 'path', 'query', 'json_body'])
@requests_mock.Mocker()
class TestRequestCallers(TestBase):
def setUp(self):
super(TestRequestCallers, self).setUp()
self.matcher = re.compile(r'^https://api.cloudflare.com/client/v4/')
def _validate_requests_calls(self, mock_request, expected_data_list):
"""Helper function to check values of calls to requests"""
# Make sure our call count matches up with what we expect
self.assertEqual(len(expected_data_list), mock_request.call_count)
for index, expected_data in enumerate(expected_data_list):
# Provide a bit more info if a test fails
expected_str = "Info: {}".format(expected_data)
request_obj = mock_request.request_history[index]
parsed_url = urlparse.urlparse(request_obj.url)
self.assertEqual(expected_data.method.upper(),
request_obj.method)
self.assertEqual(CF_API_SCHEME, parsed_url.scheme)
self.assertEqual(CF_API_HOST, parsed_url.netloc)
self.assertEqual(
"{}/{}".format(CF_API_PATH, expected_data.path),
parsed_url.path)
self.assertEqual(expected_data.query, request_obj.qs,
expected_str)
if expected_data.json_body is not None:
self.assertEqual(expected_data.json_body,
json.loads(request_obj._request.body),
expected_str)
def test__get_zone_id(self, mock_request):
expected_list = [
ExpectedRequestsData(
method='get',
path="zones",
query={'name': ['example.com']},
json_body=None,
),
]
mock_request.get(self.matcher, text=ZONE_RESPONSE)
auth, result = hook._get_zone_id("example.com")
expected_id = "023e105f4ecef8ad9ca31a8372d0c353"
self.assertEqual(expected_id, result)
self._validate_requests_calls(mock_request=mock_request,
expected_data_list=expected_list)
def test__get_txt_record_id_found(self, mock_request):
expected_list = [
ExpectedRequestsData(
method='get',
path='zones/ZONE_ID/dns_records',
query={'content': ['token'], 'name': ['example.com'],
'type': ['txt']},
json_body=None,
),
]
mock_request.get(self.matcher, text=DNS_RECORDS_RESPONSE)
result = hook._get_txt_record_id({}, "ZONE_ID", "example.com", "TOKEN")
expected_id = "372e67954025e0ba6aaa6d586b9e0b59"
self.assertEqual(expected_id, result)
self._validate_requests_calls(mock_request=mock_request,
expected_data_list=expected_list)
def test__get_txt_record_id_not_found(self, mock_request):
expected_list = [
ExpectedRequestsData(
method='get',
path="zones/ZONE_ID/dns_records",
query={'content': ['token'], 'name': ['example.com'],
'type': ['txt']},
json_body=None,
),
]
mock_request.get(self.matcher, text=DNS_RECORDS_RESPONSE_NOT_FOUND)
result = hook._get_txt_record_id({}, "ZONE_ID", "example.com", "TOKEN")
self.assertEqual(None, result)
self._validate_requests_calls(mock_request=mock_request,
expected_data_list=expected_list)
@mock.patch.object(hook, '_get_txt_record_id',
lambda auth, zone_id, name, token: None)
@mock.patch.object(hook, '_get_txt_record_id',
lambda auth, zone_id, name, token: None)
def test_create_txt_record(self, mock_request):
expected_list = [
ExpectedRequestsData(
method='get',
path="zones",
query={'name': ['example.com']},
json_body=None,
),
ExpectedRequestsData(
method='post',
path=("zones/023e105f4ecef8ad9ca31a8372d0c353/"
"dns_records"),
query={},
json_body={'content': 'TOKEN', 'type': 'TXT', 'ttl': 120,
'name': '_acme-challenge.example.com',
},
)
]
mock_request.get(self.matcher, text=ZONE_RESPONSE)
mock_request.post(self.matcher, text=CREATE_DNS_RECORD_RESPONSE)
args = ['example.com', 'CHALLENGE', 'TOKEN']
result = hook.create_txt_record(args)
self._validate_requests_calls(mock_request=mock_request,
expected_data_list=expected_list)
self.assertEqual(None, result)
# Sample responses
ZONE_RESPONSE = """
{
"success": true,
"errors": [
{}
],
"messages": [
{}
],
"result": [
{
"id": "023e105f4ecef8ad9ca31a8372d0c353",
"name": "example.com",
"development_mode": 7200,
"original_name_servers": [
"ns1.originaldnshost.com",
"ns2.originaldnshost.com"
],
"original_registrar": "GoDaddy",
"original_dnshost": "NameCheap",
"created_on": "2014-01-01T05:20:00.12345Z",
"modified_on": "2014-01-01T05:20:00.12345Z",
"owner": {
"id": "<PASSWORD>",
"email": "<EMAIL>",
"owner_type": "user"
},
"permissions": [
"#zone:read",
"#zone:edit"
],
"plan": {
"id": "e592fd9519420ba7405e1307bff33214",
"name": "Pro Plan",
"price": 20,
"currency": "USD",
"frequency": "monthly",
"legacy_id": "pro",
"is_subscribed": true,
"can_subscribe": true
},
"plan_pending": {
"id": "e592fd9519420ba7405e1307bff33214",
"name": "Pro Plan",
"price": 20,
"currency": "USD",
"frequency": "monthly",
"legacy_id": "pro",
"is_subscribed": true,
"can_subscribe": true
},
"status": "active",
"paused": false,
"type": "full",
"name_servers": [
"tony.ns.cloudflare.com",
"woz.ns.cloudflare.<EMAIL>"
]
}
],
"result_info": {
"page": 1,
"per_page": 20,
"count": 1,
"total_count": 2000
}
}
"""
DNS_RECORDS_RESPONSE = """
{
"success": true,
"errors": [],
"messages": [],
"result": [
{
"id": "372e67954025e0ba6aaa6d586b9e0b59",
"type": "TXT",
"name": "_acme-challenge.test.example.com",
"content": "WyIlYaKOp62zaDu_JDKwfXVCnr4q4ntYtmkZ3y5BF2w",
"proxiable": false,
"proxied": false,
"ttl": 120,
"locked": false,
"zone_id": "023e105f4ecef8ad9ca31a8372d0c353",
"zone_name": "example.com",
"created_on": "2014-01-01T05:20:00.12345Z",
"modified_on": "2014-01-01T05:20:00.12345Z",
"data": {}
}
],
"result_info": {
"page": 1,
"per_page": 20,
"count": 1,
"total_count": 2000
}
}
"""
DNS_RECORDS_RESPONSE_NOT_FOUND = """
{
"success": true,
"errors": [],
"messages": [],
"result": [],
"result_info": {
"page": 1,
"per_page": 20,
"count": 1,
"total_count": 2000
}
}
"""
CREATE_DNS_RECORD_RESPONSE = """
{
"success": true,
"errors": [
{}
],
"messages": [
{}
],
"result": {
"id": "372e67954025e0ba6aaa6d586b9e0b59",
"type": "A",
"name": "example.com",
"content": "1.2.3.4",
"proxiable": true,
"proxied": false,
"ttl": 120,
"locked": false,
"zone_id": "023e105f4ecef8ad9ca31a8372d0c353",
"zone_name": "example.com",
"created_on": "2014-01-01T05:20:00.12345Z",
"modified_on": "2014-01-01T05:20:00.12345Z",
"data": {}
}
}
"""
|
75811
|
import functools
import numpy as np
import torch as t
import torch.nn as nn
import torch.distributed as dist
from jukebox.transformer.ops import Conv1D, ACT_FNS, LayerNorm
from jukebox.transformer.factored_attention import FactoredAttention
from jukebox.utils.checkpoint import checkpoint
def _convert_mlp_traced(l):
if isinstance(l, ResAttnBlock):
l.mlp = t.jit.trace(l.mlp, t.randn(1, 1, l.n_in).cuda())
def _convert_mlp_traced_fp16(l):
if isinstance(l, ResAttnBlock):
l.mlp = t.jit.trace(l.mlp, t.randn(1, 1, l.n_in).cuda().half())
class MLP(nn.Module):
def __init__(self, n_in, n_state, resid_dropout=0.0, afn='quick_gelu', zero_out=False, init_scale=1.0):
super().__init__()
self.c_fc = Conv1D(n_in, n_state, init_scale=init_scale)
self.c_proj = Conv1D(n_state, n_in, zero_out, init_scale=init_scale)
self.act = ACT_FNS[afn]
self.resid_dropout = nn.Dropout(resid_dropout) if resid_dropout > 0.0 else lambda x: x
def forward(self, x):
m = self.act(self.c_fc(x))
m = self.c_proj(m)
return self.resid_dropout(m)
class ResAttnBlock(nn.Module):
def __init__(self, n_in, n_ctx, n_head,
attn_dropout=0.0, resid_dropout=0.0,
afn='quick_gelu', scale=True, mask=False,
zero_out=False, init_scale=1.0, res_scale=1.0,
m_attn = 0.25, m_mlp = 1.,
checkpoint_attn = 0, checkpoint_mlp = 0,
attn_func=0, blocks=None, spread=None,
encoder_dims=None, prime_len=None):
super().__init__()
self.attn = FactoredAttention(n_in=n_in, n_ctx=n_ctx, n_state=int(m_attn * n_in), n_head=n_head,
attn_dropout=attn_dropout, resid_dropout=resid_dropout,
scale=scale, mask=mask,
zero_out=zero_out, init_scale=init_scale,
checkpoint_attn=checkpoint_attn,
attn_func=attn_func, blocks=blocks, spread=spread,
encoder_dims=encoder_dims, prime_len=prime_len)
self.ln_0 = LayerNorm(n_in)
self.mlp = MLP(n_in=n_in, n_state=int(m_mlp * n_in),
resid_dropout=resid_dropout,
afn=afn,
zero_out=zero_out, init_scale=init_scale)
self.ln_1 = LayerNorm(n_in)
self.res_scale = res_scale
self.checkpoint_attn = checkpoint_attn
self.checkpoint_mlp = checkpoint_mlp
self.n_in = n_in
self.attn_func = attn_func
def forward(self, x, encoder_kv, sample=False):
if sample:
a = self.attn(self.ln_0(x), encoder_kv, sample)
m = self.mlp(self.ln_1(x + a))
else:
if self.attn_func == 6:
assert encoder_kv is not None
a = checkpoint(lambda _x,_enc_kv,_s=sample: self.attn(self.ln_0(_x),_enc_kv,_s),
(x,encoder_kv),
(*self.attn.parameters(), *self.ln_0.parameters()),
self.checkpoint_attn == 3) # 2 recomputes after the projections, and 1 recomputes after head splitting.
else:
assert encoder_kv is None
a = checkpoint(lambda _x,_enc_kv=None,_s=sample: self.attn(self.ln_0(_x),_enc_kv,_s),
(x,),
(*self.attn.parameters(), *self.ln_0.parameters()),
self.checkpoint_attn == 3) # 2 recomputes after the projections, and 1 recomputes after head splitting.
m = checkpoint(lambda _x: self.mlp(self.ln_1(_x)), (x + a,),
(*self.mlp.parameters(), *self.ln_1.parameters()),
self.checkpoint_mlp == 1)
if self.res_scale == 1.0:
h = x + a + m
else:
h = x + self.res_scale * (a + m)
return h
class Transformer(nn.Module):
def __init__(self, n_in, n_ctx, n_head, n_depth,
attn_dropout=0.0, resid_dropout=0.0,
afn='quick_gelu', scale=True, mask=False,
zero_out=False, init_scale=1.0, res_scale=False,
m_attn=0.25, m_mlp=1.,
checkpoint_attn=0, checkpoint_mlp=0, checkpoint_res=0,
attn_order=0, blocks=None, spread=None,
encoder_dims=None, prime_len=None):
super().__init__()
self.n_in = n_in
self.n_ctx = n_ctx
self.encoder_dims = encoder_dims
self.blocks = blocks
if blocks is not None:
assert n_ctx % blocks == 0
self.block_ctx = n_ctx // blocks
self.prime_len = prime_len
self.n_head = n_head
res_scale = 1.0 / n_depth if res_scale else 1.0
# Orders of attn_func
attn_func = {0: lambda d: 0, # Complete dense attn
1: lambda d: [1,2][d%2], # Alternate row and column attn
2: lambda d: [1,2,3][d % 3], # Alternate row, column and previous row attn
3: lambda d: [1,4][d % 2], # Alternate row and last column
4: lambda d: [1,5][d % 2], # Alternate row and last k columns
5: lambda d: [1,4,1,1][d % 4], # Alternate row, last column, row, row
6: lambda d: [1,2,3,6][d % 4],
7: lambda d: [*[1,2,3]*5,6][d%16],
8: lambda d: [1,2,3,1,2,3,1,2,3,6][d%10],
9: lambda d: [1,2,3,0][d % 4],
10: lambda d: [*[1,2,3,1,2,3,1,2,3],*[1,2,3,1,2,3,1,2,3,6]*7][d%79],
11: lambda d: [6,6,0][d%3] if d%16 == 15 else [1,2,3][d%3],
12: lambda d: [7,7,0][d%3] if d%16 == 15 else [1,2,3][d%3],
}[attn_order]
attn_cycle = {0:1, 1:2, 2:3, 3:2, 4:2, 5:4, 6:4, 7:16, 8:10, 9:4, 10:79, 11:16, 12:16}[attn_order]
#assert n_depth % attn_cycle == 0, f'Depth {n_depth} not a multiple of cycle {attn_cycle} for attn_order {attn_order}'
attn_block = lambda d: ResAttnBlock(n_in=n_in, n_ctx=n_ctx, n_head=n_head,
attn_dropout=attn_dropout, resid_dropout=resid_dropout,
afn=afn, scale=scale, mask=mask,
zero_out=zero_out if attn_func(d) !=6 else True,
init_scale=init_scale, res_scale=res_scale,
m_attn=m_attn, m_mlp=m_mlp,
checkpoint_attn=checkpoint_attn, checkpoint_mlp=checkpoint_mlp,
attn_func=attn_func(d), blocks=blocks, spread=spread,
encoder_dims=encoder_dims, prime_len=prime_len)
self.checkpoint_res = checkpoint_res
self._attn_mods = nn.ModuleList()
for d in range(n_depth):
self._attn_mods.append(attn_block(d))
self.ws = []
def set_record_attn(self, record_attn):
"""
Arguments:
record_attn (bool or set): Makes forward prop dump self-attention
softmaxes to self.ws. Either a set of layer indices indicating
which layers to store, or a boolean value indicating whether to
dump all.
"""
def _should_record_attn(layer_idx):
if isinstance(record_attn, bool):
return record_attn
return layer_idx in record_attn
for i, l in enumerate(self._attn_mods):
l.attn.record_attn = _should_record_attn(i)
if record_attn:
assert self.ws == []
for l in self._attn_mods:
assert l.attn.w == None
else:
self.ws = []
for l in self._attn_mods:
l.attn.w = None
def forward(self, x, encoder_kv=None, sample=False, fp16=False, fp16_out=False):
if fp16:
x = x.half()
# Blocks
for i,l in enumerate(self._attn_mods):
if self.checkpoint_res == 1 and not sample:
if l.attn_func == 6:
assert encoder_kv is not None
f = functools.partial(l, sample=sample)
x = checkpoint(f, (x, encoder_kv), l.parameters(), True)
else:
f = functools.partial(l, encoder_kv=None, sample=sample)
x = checkpoint(f, (x,), l.parameters(), True)
else:
if l.attn_func == 6:
x = l(x, encoder_kv=encoder_kv, sample=sample)
else:
x = l(x, encoder_kv=None, sample=sample)
if l.attn.record_attn:
self.ws.append(l.attn.w)
if not fp16_out:
x = x.float()
return x
def check_cache(self, n_samples, sample_t, fp16):
for l in self._attn_mods:
l.attn.check_cache(n_samples, sample_t, fp16)
def del_cache(self):
for l in self._attn_mods:
l.attn.del_cache()
def check_sample(self):
bs, l, s, d = (4, self.n_ctx, self.encoder_dims, self.n_in)
prime = 5
with t.no_grad():
encoder_kv = t.randn(bs, s, d).cuda()
x = t.randn(bs, l, d).cuda()
y_forw = self.forward(x, encoder_kv=encoder_kv, sample=True)
self.del_cache()
x_chunks = t.chunk(x, 4, dim=1)
y_chunks = []
n = 0
for x_chunk in x_chunks:
self.check_cache(bs, n, False)
y_chunk = self.forward(x_chunk, encoder_kv=encoder_kv, sample=True)
y_chunks.append(y_chunk)
n += x_chunk.shape[1]
self.check_cache(bs, n, False)
y_forw_in_chunks = t.cat(y_chunks, dim=1)
max_err = t.max(t.abs(y_forw - y_forw_in_chunks))
assert max_err <= 1e-6, f"Max err is {max_err} {[i for i in range(l) if t.max(t.abs(y_forw - y_forw_in_chunks)[:, i, :]) > 1e-6]}"
if __name__ == '__main__':
from jukebox.utils.dist_utils import setup_dist_from_mpi
setup_dist_from_mpi(port=29600)
n_in = 16
n_ctx = 192
n_head = 4
n_depth = 12
blocks = 16
for attn_order in [0,2,6]:
encoder_dims = {0: 0, 2: 0, 6: 64}[attn_order]
prior = Transformer(n_in, n_ctx, n_head, n_depth, mask=True, attn_order=attn_order, encoder_dims=encoder_dims, blocks=blocks).cuda()
prior.training = False
prior.check_sample()
print(f"Checked attn_order: {attn_order}")
|
75831
|
from __future__ import absolute_import, division, print_function
from .multi_pose import MultiPoseTrainer
train_factory = {
'multi_pose': MultiPoseTrainer,
}
|
75859
|
from dassl.utils import Registry, check_availability
EVALUATOR_REGISTRY = Registry("EVALUATOR")
def build_evaluator(cfg, **kwargs):
avai_evaluators = EVALUATOR_REGISTRY.registered_names()
check_availability(cfg.TEST.EVALUATOR, avai_evaluators)
if cfg.VERBOSE:
print("Loading evaluator: {}".format(cfg.TEST.EVALUATOR))
return EVALUATOR_REGISTRY.get(cfg.TEST.EVALUATOR)(cfg, **kwargs)
|
75861
|
from hybrid_astar_planner.HybridAStar.hybrid_astar_wrapper \
import apply_hybrid_astar
import numpy as np
from pylot.planning.planner import Planner
class HybridAStarPlanner(Planner):
"""Wrapper around the Hybrid A* planner.
Note:
Details can be found at `Hybrid A* Planner`_.
Args:
world: (:py:class:`~pylot.planning.world.World`): A reference to the
planning world.
flags (absl.flags): Object to be used to access absl flags.
.. _Hybrid A* Planner:
https://github.com/erdos-project/hybrid_astar_planner
"""
def __init__(self, world, flags, logger):
super().__init__(world, flags, logger)
self._hyperparameters = {
"step_size": flags.step_size_hybrid_astar,
"max_iterations": flags.max_iterations_hybrid_astar,
"completion_threshold": flags.completion_threshold,
"angle_completion_threshold": flags.angle_completion_threshold,
"rad_step": flags.rad_step,
"rad_upper_range": flags.rad_upper_range,
"rad_lower_range": flags.rad_lower_range,
"obstacle_clearance": flags.obstacle_clearance_hybrid_astar,
"lane_width": flags.lane_width_hybrid_astar,
"radius": flags.radius,
"car_length": flags.car_length,
"car_width": flags.car_width,
}
def run(self, timestamp, ttd=None):
"""Runs the planner.
Note:
The planner assumes that the world is up-to-date.
Returns:
:py:class:`~pylot.planning.waypoints.Waypoints`: Waypoints of the
planned trajectory.
"""
obstacle_list = self._world.get_obstacle_list()
if len(obstacle_list) == 0:
# Do not use Hybrid A* if there are no obstacles.
output_wps = self._world.follow_waypoints(self._flags.target_speed)
else:
# Hybrid a* does not take into account the driveable region.
# It constructs search space as a top down, minimum bounding
# rectangle with padding in each dimension.
self._logger.debug("@{}: Hyperparameters: {}".format(
timestamp, self._hyperparameters))
initial_conditions = self._compute_initial_conditions(
obstacle_list)
self._logger.debug("@{}: Initial conditions: {}".format(
timestamp, initial_conditions))
path_x, path_y, _, success = apply_hybrid_astar(
initial_conditions, self._hyperparameters)
if success:
self._logger.debug(
"@{}: Hybrid A* succeeded".format(timestamp))
speeds = [self._flags.target_speed] * len(path_x)
self._logger.debug("@{}: Hybrid A* Path X: {}".format(
timestamp, path_x.tolist()))
self._logger.debug("@{}: Hybrid A* Path Y: {}".format(
timestamp, path_y.tolist()))
self._logger.debug("@{}: Hybrid A* Speeds: {}".format(
timestamp, speeds))
output_wps = self.build_output_waypoints(
path_x, path_y, speeds)
else:
self._logger.error("@{}: Hybrid A* failed. "
"Sending emergency stop.".format(timestamp))
output_wps = self._world.follow_waypoints(0)
return output_wps
def _compute_initial_conditions(self, obstacles):
ego_transform = self._world.ego_transform
start = np.array([
ego_transform.location.x,
ego_transform.location.y,
np.deg2rad(ego_transform.rotation.yaw),
])
self._world.waypoints.remove_completed(ego_transform.location)
end_index = min(self._flags.num_waypoints_ahead,
len(self._world.waypoints.waypoints) - 1)
if end_index < 0:
# If no more waypoints left. Then our location is our end wp.
self._logger.debug("@{}: No more waypoints left")
end_wp = ego_transform
else:
end_wp = self._world.waypoints.waypoints[end_index]
end = np.array([
end_wp.location.x, end_wp.location.y,
np.deg2rad(ego_transform.rotation.yaw)
])
initial_conditions = {
"start": start,
"end": end,
"obs": obstacles,
}
return initial_conditions
|
75899
|
import os
import numpy as np
import matplotlib.pyplot as plt
import pyvips as Vips
NP_DTYPE_TO_VIPS_FORMAT = {
np.dtype('int8'): Vips.BandFormat.CHAR,
np.dtype('uint8'): Vips.BandFormat.UCHAR,
np.dtype('int16'): Vips.BandFormat.SHORT,
np.dtype('uint16'): Vips.BandFormat.USHORT,
np.dtype('int32'): Vips.BandFormat.INT,
np.dtype('float32'): Vips.BandFormat.FLOAT,
np.dtype('float64'): Vips.BandFormat.DOUBLE
}
VIPS_FORMAT_TO_NP_DTYPE = {v:k for k, v in NP_DTYPE_TO_VIPS_FORMAT.items()}
def array_vips(vips_image, verbose=False):
# dtype = np.dtype('u{}'.format(vips_image.BandFmt.bit_length() + 1))
dtype = VIPS_FORMAT_TO_NP_DTYPE[vips_image.format]
if verbose:
print(dtype, vips_image.height, vips_image.width, vips_image.bands)
return (np.fromstring(vips_image.write_to_memory(), dtype=dtype) #np.uint8)
.reshape(vips_image.height, vips_image.width, vips_image.bands))
def show_vips(vips_image, ax=plt, show=True, verbose=False):
if not isinstance(vips_image, Vips.Image):
return -1
im_np = array_vips(vips_image)
if verbose:
print(im_np.shape)
if vips_image.bands == 1:
ax.imshow(im_np.squeeze()/np.max(im_np), cmap=plt.get_cmap('gist_ncar'))
elif vips_image.bands == 2:
im_np = im_np[:,:,1]
ax.imshow(im_np/np.max(im_np), cmap=plt.get_cmap('gray'))
else:
ax.imshow(im_np)
if show:
plt.show()
def image_fields_dict(im_with_fields):
return {k:im_with_fields.get(k)
for k in im_with_fields.get_fields()
if im_with_fields.get_typeof(k)}
# from https://github.com/jcupitt/libvips/blob/master/doc/Examples.md
NP_DTYPE_TO_VIPS_FORMAT = {
np.dtype('int8'): Vips.BandFormat.CHAR,
np.dtype('uint8'): Vips.BandFormat.UCHAR,
np.dtype('int16'): Vips.BandFormat.SHORT,
np.dtype('uint16'): Vips.BandFormat.USHORT,
np.dtype('int32'): Vips.BandFormat.INT,
np.dtype('float32'): Vips.BandFormat.FLOAT,
np.dtype('float64'): Vips.BandFormat.DOUBLE
}
VIPS_FORMAT_TO_NP_DTYPE = {v:k for k, v in NP_DTYPE_TO_VIPS_FORMAT.items()}
def array_vips(vips_image, verbose=False):
# dtype = np.dtype('u{}'.format(vips_image.BandFmt.bit_length() + 1))
dtype = VIPS_FORMAT_TO_NP_DTYPE[vips_image.format]
if verbose:
print(dtype, vips_image.height, vips_image.width, vips_image.bands)
return (np.fromstring(vips_image.write_to_memory(), dtype=dtype) #np.uint8)
.reshape(vips_image.height, vips_image.width, vips_image.bands)).squeeze()
def show_vips(vips_image, ax=plt, show=True, verbose=False):
if not isinstance(vips_image, Vips.Image):
return -1
im_np = array_vips(vips_image)
if verbose:
print(im_np.shape)
if vips_image.bands == 1:
ax.imshow(im_np/np.max(im_np), cmap=plt.get_cmap('gist_ncar'))
elif vips_image.bands == 2:
im_np = im_np[:,:,1]
ax.imshow(im_np/np.max(im_np), cmap=plt.get_cmap('gray'))
else:
ax.imshow(im_np)
if show:
plt.show()
def image_fields_dict(im_with_fields):
return {k:im_with_fields.get(k)
for k in im_with_fields.get_fields()
if im_with_fields.get_typeof(k)}
def save_and_tile(image_to_segment, output_dir, tile_size=1536):
basename = os.path.basename(image_to_segment.filename)
base_dir_name = os.path.join(output_dir, basename.split('.svs')[0])
if not os.path.exists(base_dir_name):
os.makedirs(base_dir_name)
Vips.Image.dzsave(image_to_segment, base_dir_name,
layout='google',
suffix='.jpg[Q=90]',
tile_size=tile_size,
depth='one',
properties=True)
return None
|
75908
|
from torch import nn
import torch.nn.functional as F
from ssd.modeling import registry
from ssd.modeling.anchors.prior_box import PriorBox
from ssd.modeling.box_head.box_predictor import make_box_predictor
from ssd.utils import box_utils
from .inference import PostProcessor
from .loss import MultiBoxLoss, FocalLoss
@registry.BOX_HEADS.register('SSDBoxHead')
class SSDBoxHead(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.predictor = make_box_predictor(cfg)
#
if self.cfg.MODEL.BOX_HEAD.LOSS == 'FocalLoss':
self.loss_evaluator = FocalLoss(0.25, 2)
else: # By default, we use MultiBoxLoss
self.loss_evaluator = MultiBoxLoss(neg_pos_ratio=cfg.MODEL.NEG_POS_RATIO)
self.post_processor = PostProcessor(cfg)
self.priors = None
def forward(self, features, targets=None):
cls_logits, bbox_pred = self.predictor(features)
if self.training:
return self._forward_train(cls_logits, bbox_pred, targets)
else:
return self._forward_test(cls_logits, bbox_pred)
def _forward_train(self, cls_logits, bbox_pred, targets):
gt_boxes, gt_labels = targets['boxes'], targets['labels']
reg_loss, cls_loss = self.loss_evaluator(cls_logits, bbox_pred, gt_labels, gt_boxes)
loss_dict = dict(
reg_loss=reg_loss,
cls_loss=cls_loss,
)
detections = (cls_logits, bbox_pred)
return detections, loss_dict
def _forward_test(self, cls_logits, bbox_pred):
if self.priors is None:
self.priors = PriorBox(self.cfg)().to(bbox_pred.device)
#
if self.cfg.MODEL.BOX_HEAD.LOSS == 'FocalLoss':
scores = cls_logits.sigmoid()
else:
scores = F.softmax(cls_logits, dim=2)
boxes = box_utils.convert_locations_to_boxes(
bbox_pred, self.priors, self.cfg.MODEL.CENTER_VARIANCE, self.cfg.MODEL.SIZE_VARIANCE
)
boxes = box_utils.center_form_to_corner_form(boxes)
detections = (scores, boxes)
detections = self.post_processor(detections)
return detections, {}
|
75940
|
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import logging
from urllib.parse import quote
import requests
from pypuppetdb.errors import (APIError, EmptyResponseError)
log = logging.getLogger(__name__)
ENDPOINTS = {
'facts': 'pdb/query/v4/facts',
'fact-names': 'pdb/query/v4/fact-names',
'nodes': 'pdb/query/v4/nodes',
'resources': 'pdb/query/v4/resources',
'catalogs': 'pdb/query/v4/catalogs',
'mbean': 'metrics/v1/mbeans',
# metrics v2 endpoint is now the jolokia library and all of its operations
# https://jolokia.org/reference/html/protocol.html#jolokia-operations
'metrics': 'metrics/v2/read',
'metrics-base': 'metrics/v2',
'metrics-exec': 'metrics/v2/exec',
'metrics-list': 'metrics/v2/list',
'metrics-search': 'metrics/v2/search',
'metrics-write': 'metrics/v2/write',
'metrics-version': 'metrics/v2/version',
'reports': 'pdb/query/v4/reports',
'events': 'pdb/query/v4/events',
'event-counts': 'pdb/query/v4/event-counts',
'aggregate-event-counts': 'pdb/query/v4/aggregate-event-counts',
'server-time': 'pdb/meta/v1/server-time',
'version': 'pdb/meta/v1/version',
'environments': 'pdb/query/v4/environments',
'factsets': 'pdb/query/v4/factsets',
'fact-paths': 'pdb/query/v4/fact-paths',
'fact-contents': 'pdb/query/v4/fact-contents',
'edges': 'pdb/query/v4/edges',
'pql': 'pdb/query/v4',
'inventory': 'pdb/query/v4/inventory',
'status': 'status/v1/services/puppetdb-status',
'cmd': 'pdb/cmd/v1'
}
PARAMETERS = {
'order_by': 'order_by',
'include_total': 'include_total',
'count_by': 'count_by',
'counts_filter': 'counts_filter',
'summarize_by': 'summarize_by',
'server_time': 'server_time',
}
COMMAND_VERSION = {
"deactivate node": 3,
"replace catalog": 9,
"replace facts": 5,
"store report": 8
}
ERROR_STRINGS = {
'timeout': 'Connection to PuppetDB timed out on',
'refused': 'Could not reach PuppetDB on',
}
class BaseAPI(object):
"""This is a Base or Abstract class and is not meant to be instantiated
or used directly.
The BaseAPI object defines a set of methods that can be
reused across different versions of the PuppetDB API. If querying for a
certain resource is done in an identical fashion across different versions
it will be implemented here and should be overridden in their respective
versions if they deviate.
If :attr:`ssl` is set to `True` but either :attr:`ssl_key` or\
:attr:`ssl_cert` are `None` this will raise an error.
:param host: (optional) Hostname or IP of PuppetDB.
:type host: :obj:`string`
:param port: (optional) Port on which to talk to PuppetDB.
:type port: :obj:`int`
:param ssl_verify: (optional) Verify PuppetDB server certificate.
:type ssl_verify: :obj:`bool` or :obj:`string`
:param ssl_key: (optional) Path to our client secret key.
:type ssl_key: :obj:`None` or :obj:`string` representing a filesystem\
path.
:param ssl_cert: (optional) Path to our client certificate.
:type ssl_cert: :obj:`None` or :obj:`string` representing a filesystem\
path.
:param timeout: (optional) Number of seconds to wait for a response.
:type timeout: :obj:`int`
:param protocol: (optional) Explicitly specify the protocol to be used
(especially handy when using HTTPS with ssl_verify=False and
without certs)
:type protocol: :obj:`None` or :obj:`string`
:param url_path: (optional) The URL path where PuppetDB is served
(if not at the root / path)
:type url_path: :obj:`None` or :obj:`string`
:param username: (optional) The username to use for HTTP basic
authentication
:type username: :obj:`None` or :obj:`string`
:param password: (optional) The password to use for HTTP basic
authentication
:type password: :obj:`None` or :obj:`string`
:param token: (optional) The X-auth token to use for X-Authentication
:type token: :obj:`None` or :obj:`string`
:param metric_api_version: (Default 'v2') Version of the metric API we're initialising.
:type metric_api_version: :obj:`None` or :obj:`string`
:raises: :class:`~pypuppetdb.errors.ImproperlyConfiguredError`
"""
def __init__(self, host='localhost', port=8080, ssl_verify=True,
ssl_key=None, ssl_cert=None, timeout=10, protocol=None,
url_path=None, username=None, password=<PASSWORD>, token=<PASSWORD>,
metric_api_version=None):
"""Initialises our BaseAPI object passing the parameters needed in
order to be able to create the connection strings, set up SSL and
timeouts and so forth."""
self.api_version = 'v4'
if metric_api_version is not None and metric_api_version not in ['v1', 'v2']:
raise ValueError("metric_api_version specified must be None, 'v1' or 'v2',"
" was given: '{}'".format(metric_api_version))
self.metric_api_version = metric_api_version if metric_api_version else 'v2'
self.host = host
self.port = port
self.ssl_verify = ssl_verify
self.ssl_key = ssl_key
self.ssl_cert = ssl_cert
self.timeout = timeout
self.token = token
# Standardise the URL path to a format similar to /puppetdb
if url_path:
if not url_path.startswith('/'):
url_path = '/' + url_path
if url_path.endswith('/'):
url_path = url_path[:-1]
else:
url_path = ''
self.url_path = url_path
self.session = requests.Session()
if username and password:
self.session.auth = (username, password)
self.session.headers = {
'content-type': 'application/json',
'accept': 'application/json',
'accept-charset': 'utf-8'
}
if self.token:
self.session.headers['X-Authentication'] = self.token
if protocol is not None:
protocol = protocol.lower()
if protocol not in ['http', 'https']:
raise ValueError('Protocol specified must be http or https')
self.protocol = protocol
elif self.ssl_key is not None and self.ssl_cert is not None:
self.protocol = 'https'
elif self.token is not None:
self.protocol = 'https'
else:
self.protocol = 'http'
def disconnect(self):
"""Close all connections that this class opened up."""
# If we don't explicitly close connections, we might cause other
# functions or libraries to hang on the open connections. This happens
# for example with using paramiko to tunnel PuppetDB connections
# through ssh.
self.session.close()
def __enter__(self):
"""Set up environment for 'with' statement."""
# Once this class has been instantiated, there's nothing more required
return self
def __exit__(self, type, value, trace):
"""Tear down connections."""
self.disconnect()
@property
def version(self):
"""The version of the API we're querying against.
:returns: Current API version.
:rtype: :obj:`string`"""
return self.api_version
@property
def base_url(self):
"""A base_url that will be used to construct the final
URL we're going to query against.
:returns: A URL of the form: ``proto://host:port``.
:rtype: :obj:`string`
"""
return '{proto}://{host}:{port}{url_path}'.format(
proto=self.protocol,
host=self.host,
port=self.port,
url_path=self.url_path,
)
@property
def total(self):
"""The total-count of the last request to PuppetDB
if enabled as parameter in _query method
:returns Number of total results
:rtype :obj:`int`
"""
if self.last_total is not None:
return int(self.last_total)
@staticmethod
def _normalize_resource_type(type_):
"""Normalizes the type passed to the api by capitalizing each part
of the type. For example:
sysctl::value -> Sysctl::Value
user -> User
"""
return '::'.join([s.capitalize() for s in type_.split('::')])
def _url(self, endpoint, path=None):
"""The complete URL we will end up querying. Depending on the
endpoint we pass in this will result in different URL's with
different prefixes.
:param endpoint: The PuppetDB API endpoint we want to query.
:type endpoint: :obj:`string`
:param path: An additional path if we don't wish to query the\
bare endpoint.
:type path: :obj:`string`
:returns: A URL constructed from :func:`base_url` with the\
apropraite API version/prefix and the rest of the path added\
to it.
:rtype: :obj:`string`
"""
log.debug('_url called with endpoint: {0} and path: {1}'.format(
endpoint, path))
try:
endpoint = ENDPOINTS[endpoint]
except KeyError:
# If we reach this we're trying to query an endpoint that doesn't
# exist. This shouldn't happen unless someone made a booboo.
raise APIError
url = '{base_url}/{endpoint}'.format(
base_url=self.base_url,
endpoint=endpoint,
)
if path is not None:
url = '{0}/{1}'.format(url, quote(path))
return url
def _query(self, endpoint=None, path=None, query=None,
order_by=None, limit=None, offset=None, include_total=False,
summarize_by=None, count_by=None, count_filter=None,
payload=None, request_method='GET'):
"""This method prepares a non-PQL query to PuppetDB. Actual making
the HTTP request is done by _make_request().
:param endpoint: (optional) The PuppetDB API endpoint we want to query.
Unnecessary when using PQL.
:type endpoint: :obj:`string`
:param path: An additional path if we don't wish to query the\
bare endpoint.
:type path: :obj:`string`
:param query: (optional) An AST query to further narrow down the resultset.
:type query: :obj:`string`
:param order_by: (optional) Set the order parameters for the resultset.
:type order_by: :obj:`string`
:param limit: (optional) Tell PuppetDB to limit it's response to this\
number of objects.
:type limit: :obj:`int`
:param offset: (optional) Tell PuppetDB to start it's response from\
the given offset. This is useful for implementing pagination\
but is not supported just yet.
:type offset: :obj:`string`
:param include_total: (optional) Include the total number of results
:type order_by: :obj:`bool`
:param summarize_by: (optional) Specify what type of object you'd like\
to see counts at the event-counts and aggregate-event-counts \
endpoints
:type summarize_by: :obj:`string`
:param count_by: (optional) Specify what type of object is counted
:type count_by: :obj:`string`
:param count_filter: (optional) Specify a filter for the results
:type count_filter: :obj:`string`
:param payload: (optional) Arbitrary payload to send as part of the request.
:type payload: :obj:`dict`
:raises: :class:`~pypuppetdb.errors.EmptyResponseError`
:returns: The decoded response from PuppetDB
:rtype: :obj:`dict` or :obj:`list`
"""
# inside the list comprehension the locals()'s value changes
# so we need to make a copy of the function's local() to use it there
function_locals = locals().copy()
log.debug("_query called with: " +
# comma-separated list of method arguments with their values
", ".join([f"{arg}={function_locals.get(arg, 'None')}"
for arg in function_locals.keys() if arg != 'self'])
)
if not endpoint:
log.error("Endpoint is required!")
raise APIError
if payload is None:
payload = {}
url = self._url(endpoint, path=path)
if query is not None:
payload['query'] = query
if order_by is not None:
payload[PARAMETERS['order_by']] = order_by
if limit is not None:
payload['limit'] = limit
if include_total is True:
payload[PARAMETERS['include_total']] = \
json.dumps(include_total)
if offset is not None:
payload['offset'] = offset
if summarize_by is not None:
payload[PARAMETERS['summarize_by']] = summarize_by
if count_by is not None:
payload[PARAMETERS['count_by']] = count_by
if count_filter is not None:
payload[PARAMETERS['counts_filter']] = count_filter
return self._make_request(url, request_method, payload)
def _make_request(self, url, request_method, payload):
"""
Makes a GET or POST HTTP request to PuppetDB. If PuppetDB can be
reached and answers within the timeout we'll decode the response
and give it back or raise for the HTTP Status Code PuppetDB gave back.
:param url: Complete URL to call
:param request_method: GET or POST
:param payload: data to send as parameters (GET) or in the body (POST)
:return: response body as JSON
or raises an EmptyResponseError exception if it's empty
"""
if request_method.upper() not in ['GET', 'POST']:
log.error(f"Only GET or POST supported, {request_method} unsupported")
raise APIError
try:
if request_method.upper() == 'GET':
r = self.session.get(url, params=payload,
verify=self.ssl_verify,
cert=(self.ssl_cert, self.ssl_key),
timeout=self.timeout,
)
else:
r = self.session.post(url,
data=json.dumps(payload, default=str),
verify=self.ssl_verify,
cert=(self.ssl_cert, self.ssl_key),
timeout=self.timeout,
)
r.raise_for_status()
# get total number of results if requested with include-total
# just a quick hack - needs improvement
if 'X-Records' in r.headers:
self.last_total = r.headers['X-Records']
else:
self.last_total = None
json_body = r.json()
if json_body is not None:
return json_body
else:
del json_body
raise EmptyResponseError
except requests.exceptions.Timeout:
log.error("{0} {1}:{2} over {3}.".format(ERROR_STRINGS['timeout'],
self.host, self.port,
self.protocol.upper()))
raise
except requests.exceptions.ConnectionError:
log.error("{0} {1}:{2} over {3}.".format(ERROR_STRINGS['refused'],
self.host, self.port,
self.protocol.upper()))
raise
except requests.exceptions.HTTPError as err:
log.error("{0} {1}:{2} over {3}.".format(err.response.text,
self.host, self.port,
self.protocol.upper()))
raise
|
75988
|
from rllab.misc import logger
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import os.path as osp
import numpy as np
import math
import random
def line_intersect(pt1, pt2, ptA, ptB):
"""
Taken from https://www.cs.hmc.edu/ACM/lectures/intersections.html
this returns the intersection of Line(pt1,pt2) and Line(ptA,ptB)
returns a tuple: (xi, yi, valid, r, s), where
(xi, yi) is the intersection
r is the scalar multiple such that (xi,yi) = pt1 + r*(pt2-pt1)
s is the scalar multiple such that (xi,yi) = pt1 + s*(ptB-ptA)
valid == 0 if there are 0 or inf. intersections (invalid)
valid == 1 if it has a unique intersection ON the segment
"""
DET_TOLERANCE = 0.00000001
# the first line is pt1 + r*(pt2-pt1)
# in component form:
x1, y1 = pt1
x2, y2 = pt2
dx1 = x2 - x1
dy1 = y2 - y1
# the second line is ptA + s*(ptB-ptA)
x, y = ptA
xB, yB = ptB
dx = xB - x
dy = yB - y
# we need to find the (typically unique) values of r and s
# that will satisfy
#
# (x1, y1) + r(dx1, dy1) = (x, y) + s(dx, dy)
#
# which is the same as
#
# [ dx1 -dx ][ r ] = [ x-x1 ]
# [ dy1 -dy ][ s ] = [ y-y1 ]
#
# whose solution is
#
# [ r ] = _1_ [ -dy dx ] [ x-x1 ]
# [ s ] = DET [ -dy1 dx1 ] [ y-y1 ]
#
# where DET = (-dx1 * dy + dy1 * dx)
#
# if DET is too small, they're parallel
#
DET = (-dx1 * dy + dy1 * dx)
if math.fabs(DET) < DET_TOLERANCE: return (0, 0, 0, 0, 0)
# now, the determinant should be OK
DETinv = 1.0 / DET
# find the scalar amount along the "self" segment
r = DETinv * (-dy * (x - x1) + dx * (y - y1))
# find the scalar amount along the input line
s = DETinv * (-dy1 * (x - x1) + dx1 * (y - y1))
# return the average of the two descriptions
xi = (x1 + r * dx1 + x + s * dx) / 2.0
yi = (y1 + r * dy1 + y + s * dy) / 2.0
return (xi, yi, 1, r, s)
def ray_segment_intersect(ray, segment):
"""
Check if the ray originated from (x, y) with direction theta intersects the line segment (x1, y1) -- (x2, y2),
and return the intersection point if there is one
"""
(x, y), theta = ray
# (x1, y1), (x2, y2) = segment
pt1 = (x, y)
len = 1
pt2 = (x + len * math.cos(theta), y + len * math.sin(theta))
xo, yo, valid, r, s = line_intersect(pt1, pt2, *segment)
if valid and r >= 0 and 0 <= s <= 1:
return (xo, yo)
return None
def point_distance(p1, p2):
x1, y1 = p1
x2, y2 = p2
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
def construct_maze(maze_id=0, length=1):
# define the maze to use
if maze_id == 0:
if length != 1:
raise NotImplementedError("Maze_id 0 only has length 1!")
structure = [
[1, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 0, 1],
[1, 'r', 0, 'g', 1],
[1, 1, 1, 1, 1],
]
# structure = [
# [0, 0, 0, 0, 0],
# [0, 'r', 0, 0, 0],
# [0, 0, 0, 0, 0],
# [0, 0, 0, 'g', 0],
# [0, 0, 0, 0, 0],
# ]
# structure = [
# [1, 1, 1, 1, 1, 1, 1],
# [1, 0, 0, 0, 0, 'g', 1],
# [1, 0, 0, 0, 0, 0, 1],
# [1, 'r', 0, 0, 0, 0, 1],
# [1, 1, 1, 1, 1, 1, 1],
# ]
elif maze_id == 1: # donuts maze: can reach the single goal by 2 equal paths
c = length + 4
M = np.ones((c, c))
M[1:c - 1, (1, c - 2)] = 0
M[(1, c - 2), 1:c - 1] = 0
M = M.astype(int).tolist()
M[1][c // 2] = 'r'
M[c - 2][c // 2] = 'g'
structure = M
elif maze_id == 2: # spiral maze: need to use all the keys (only makes sense for length >=3)
c = length + 4
M = np.ones((c, c))
M[1:c - 1, (1, c - 2)] = 0
M[(1, c - 2), 1:c - 1] = 0
M = M.astype(int).tolist()
M[1][c // 2] = 'r'
# now block one of the ways and put the goal on the other side
M[1][c // 2 - 1] = 1
M[1][c // 2 - 2] = 'g'
structure = M
elif maze_id == 3: # corridor with goals at the 2 extremes
structure = [
[1] * (2 * length + 5),
[1, 'g'] + [0] * length + ['r'] + [0] * length + ['g', 1],
[1] * (2 * length + 5),
]
elif 4 <= maze_id <= 7: # cross corridor, goal in
c = 2 * length + 5
M = np.ones((c, c))
M = M - np.diag(np.ones(c))
M = M - np.diag(np.ones(c - 1), 1) - np.diag(np.ones(c - 1), -1)
i = np.arange(c)
j = i[::-1]
M[i, j] = 0
M[i[:-1], j[1:]] = 0
M[i[1:], j[:-1]] = 0
M[np.array([0, c - 1]), :] = 1
M[:, np.array([0, c - 1])] = 1
M = M.astype(int).tolist()
M[c // 2][c // 2] = 'r'
if maze_id == 4:
M[1][1] = 'g'
if maze_id == 5:
M[1][c - 2] = 'g'
if maze_id == 6:
M[c - 2][1] = 'g'
if maze_id == 7:
M[c - 2][c - 2] = 'g'
structure = M
elif maze_id == 8: # reflexion of benchmark maze
structure = [
[1, 1, 1, 1, 1],
[1, 'g', 0, 0, 1],
[1, 1, 1, 0, 1],
[1, 'r', 0, 0, 1],
[1, 1, 1, 1, 1],
]
elif maze_id == 9: # sym benchmark maze
structure = [
[1, 1, 1, 1, 1],
[1, 0, 0, 'r', 1],
[1, 0, 1, 1, 1],
[1, 0, 0, 'g', 1],
[1, 1, 1, 1, 1],
]
elif maze_id == 10: # reflexion of sym of benchmark maze
structure = [
[1, 1, 1, 1, 1],
[1, 0, 0, 'g', 1],
[1, 0, 1, 1, 1],
[1, 0, 0, 'r', 1],
[1, 1, 1, 1, 1],
]
elif maze_id == 11: # four room
structure = [
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1],
[1,'r',0,'g',1, 0, 0, 0, 1],
[1, 1, 0, 1, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 1, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
]
if structure:
return structure
else:
raise NotImplementedError("The provided MazeId is not recognized")
def construct_maze_random(maze_id=0, length=1): # random start, only correct for maze_id == 8 for now!
x_relative = y_relative = 0 # for random start position
# define the maze to use
if maze_id == 0:
if length != 1:
raise NotImplementedError("Maze_id 0 only has length 1!")
structure = [
[1, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 0, 1],
[1, 'r', 0, 'g', 1],
[1, 1, 1, 1, 1],
]
elif maze_id == 1: # donuts maze: can reach the single goal by 2 equal paths
c = length + 4
M = np.ones((c, c))
M[1:c - 1, (1, c - 2)] = 0
M[(1, c - 2), 1:c - 1] = 0
M = M.astype(int).tolist()
M[1][c // 2] = 'r'
M[c - 2][c // 2] = 'g'
structure = M
elif maze_id == 2: # spiral maze: need to use all the keys (only makes sense for length >=3)
c = length + 4
M = np.ones((c, c))
M[1:c - 1, (1, c - 2)] = 0
M[(1, c - 2), 1:c - 1] = 0
M = M.astype(int).tolist()
M[1][c // 2] = 'r'
# now block one of the ways and put the goal on the other side
M[1][c // 2 - 1] = 1
M[1][c // 2 - 2] = 'g'
structure = M
elif maze_id == 3: # corridor with goals at the 2 extremes
structure = [
[1] * (2 * length + 5),
[1, 'g'] + [0] * length + ['r'] + [0] * length + ['g', 1],
[1] * (2 * length + 5),
]
elif 4 <= maze_id <= 7: # cross corridor, goal in
c = 2 * length + 5
M = np.ones((c, c))
M = M - np.diag(np.ones(c))
M = M - np.diag(np.ones(c - 1), 1) - np.diag(np.ones(c - 1), -1)
i = np.arange(c)
j = i[::-1]
M[i, j] = 0
M[i[:-1], j[1:]] = 0
M[i[1:], j[:-1]] = 0
M[np.array([0, c - 1]), :] = 1
M[:, np.array([0, c - 1])] = 1
M = M.astype(int).tolist()
M[c // 2][c // 2] = 'r'
if maze_id == 4:
M[1][1] = 'g'
if maze_id == 5:
M[1][c - 2] = 'g'
if maze_id == 6:
M[c - 2][1] = 'g'
if maze_id == 7:
M[c - 2][c - 2] = 'g'
structure = M
elif maze_id == 8: # benchmark maze
# not used random seed here. Not necessary?
# random.seed(1)
pool = [(1, 2), (1, 3), (2, 3), (3, 3), (3, 2), (3, 1)]
random_i = np.random.choice(len(pool), p=[0.07, 0.08, 0.11, 0.14, 0.2, 0.4])
x_r, y_r = pool[random_i]
# (3, 1) was the initial choice!
structure = [
[1, 1, 1, 1, 1],
[1, 'g', 0, 0, 1],
[1, 1, 1, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1],
]
x_r, y_r = 3, 1 # fix start, only for the maze
x_g, y_g = 1, 1 # useless for this env
structure[x_r][y_r] = 'r'
x_relative = x_r - 3 # the x index relative to (0, 0)
y_relative = y_r - 1
pool = [(1, 2), (1, 3), (2, 3), (3, 3), (3, 2), (3, 1)] # random pool
p = [0.07, 0.08, 0.11, 0.14, 0.2, 0.4] # choice probability from pool
elif maze_id == 9: # mirrored maze, for transfer
structure = [
[1, 1, 1, 1, 1],
[1, 0, 0, 'g', 1],
[1, 0, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1],
]
x_r, y_r = 3, 1 # fix start, only for the maze
x_g, y_g = 1, 3 # useless for this env
structure[x_r][y_r] = 'r'
# print("x_r, y_r", x_r, y_r)
x_relative = x_r - 3 # the x index relative to (0, 0)
y_relative = y_r - 1
pool = [(1, 2), (1, 1), (2, 1), (3, 1), (3, 2), (3, 3)]
p = [0.07, 0.08, 0.11, 0.14, 0.2, 0.4] # hand-crafted possibility, higher possibility for farther starting pt
elif maze_id == 11:
structure = [
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1],
[1,'r',0,'g',1, 0, 0, 0, 1],
[1, 1, 0, 1, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 1, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
]
x_r, y_r = 3, 1
x_g, y_g = 3, 3 # this is hard-coded in maze_env.py!
x_relative = 0 # x_r, y_r relative to a fixed point
y_relative = 0
pool = [(1, 1), (1, 2), (1, 3), (1, 5), (1, 6), (1, 7),
(2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7),
(3, 1), (3, 2), (3, 3), (3, 5), (3, 6), (3, 7),
(4, 2), (4, 5), (4, 6), (4, 7),
(5, 1), (5, 2), (5, 3), (5, 6),
(6, 1), (6, 2), (6, 3), (6, 5), (6, 6), (6, 7),
(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6), (7, 7),
(8, 1), (8, 2), (8, 3), (8, 5), (8, 6), (8, 7),
] # random pool for r or g
p = [0.02174] * 45 + [0.0217] # choice probability from pool
elif maze_id == 12: # a maze that deviates from the original maze a little (to test its transferability)
structure = [ # a spiral matrix
[1, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 'g', 1, 0, 1],
[1, 1, 1, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1]
]
x_r, y_r = 4, 1
# x_g, y_g = 2, 1 # these are hard-coded in maze_env.py! need to modify!!!
structure[x_r][y_r] = 'r'
x_relative = 1 # x_r, y_r relative to (3, 1)
y_relative = 0
pool = [(1, 1), (1, 2), (1, 3), (2, 3), (3, 3), (4, 3), (4, 2), (4, 1)] # random pool for r or g
p = [0.028, 0.056, 0.083, 0.11, 0.139, 0.17, 0.194, 0.22]
elif maze_id == 14: # a maze that deviates from the original maze a little (to test its transferability)
structure = [ # a mirrored spiral matrix
[1, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 0, 1, 'g', 1],
[1, 0, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1]
]
x_r, y_r = 4, 3
structure[x_r][y_r] = 'r'
x_relative = 1 # x_r, y_r relative to (3, 1)
y_relative = 2
pool = [(1, 3), (1, 2), (1, 1), (2, 1), (3, 1), (4, 1), (4, 2), (4, 3)] # random pool for r or g
p = [0.028, 0.056, 0.083, 0.11, 0.139, 0.17, 0.194, 0.22]
elif maze_id == 13: # a maze that is elongated
structure = [
[1, 1, 1, 1, 1, 1, 1],
[1, 'g', 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1]
]
x_r, y_r = 5, 1
structure[x_r][y_r] = 'r'
x_relative = 2 # x_r, y_r relative to a fixed point (3, 1)
y_relative = 0
pool = [(1, 2), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (4, 5), (5, 5), (5, 4), (5, 3), (5, 2), (5, 1)] # random pool for r or g
p = [0.013, 0.026, 0.028, 0.051, 0.064, 0.077, 0.090, 0.103, 0.115, 0.128, 0.141, 0.164]
elif maze_id == 10: # reflexion of sym of benchmark maze
structure = [
[1, 1, 1, 1, 1],
[1, 0, 0, 'g', 1],
[1, 0, 1, 1, 1],
[1, 0, 0, 'r', 1],
[1, 1, 1, 1, 1],
]
if structure:
return pool, p, structure, x_relative, y_relative
else:
raise NotImplementedError("The provided MazeId is not recognized")
def plot_ray(self, reading, ray_idx, color='r'):
structure = self.MAZE_STRUCTURE
size_scaling = self.MAZE_SIZE_SCALING
# duplicate cells to plot the maze
structure_plot = np.zeros(((len(structure) - 1) * 2, (len(structure[0]) - 1) * 2))
for i in range(len(structure)):
for j in range(len(structure[0])):
cell = structure[i][j]
if type(cell) is not int:
cell = 0.3 if cell == 'r' else 0.7
if i == 0:
if j == 0:
structure_plot[i, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[i, 2 * j - 1] = cell
else:
structure_plot[i, 2 * j - 1:2 * j + 1] = cell
elif i == len(structure) - 1:
if j == 0:
structure_plot[2 * i - 1, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[2 * i - 1, 2 * j - 1] = cell
else:
structure_plot[2 * i - 1, 2 * j - 1:2 * j + 1] = cell
else:
if j == 0:
structure_plot[2 * i - 1:2 * i + 1, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[2 * i - 1:2 * i + 1, 2 * j - 1] = cell
else:
structure_plot[2 * i - 1:2 * i + 1, 2 * j - 1:2 * j + 1] = cell
fig, ax = plt.subplots()
im = ax.pcolor(-np.array(structure_plot), cmap='gray', edgecolor='black', linestyle=':', lw=1)
x_labels = list(range(len(structure[0])))
y_labels = list(range(len(structure)))
ax.grid(True) # elimiate this to avoid inner lines
ax.xaxis.set(ticks=2 * np.arange(len(x_labels)), ticklabels=x_labels)
ax.yaxis.set(ticks=2 * np.arange(len(y_labels)), ticklabels=y_labels)
robot_xy = np.array(self.wrapped_env.get_body_com("torso")[:2]) # the coordinates of this are wrt the init!!
ori = self.get_ori() # for Ant this is computed with atan2, which gives [-pi, pi]
# compute origin cell i_o, j_o coordinates and center of it x_o, y_o (with 0,0 in the top-right corner of struc)
o_xy = np.array(self._find_robot()) # this is self.init_torso_x, self.init_torso_y !!: center of the cell xy!
o_ij = (o_xy / size_scaling).astype(int) # this is the position in the grid (check if correct..)
o_xy_plot = o_xy / size_scaling * 2
robot_xy_plot = o_xy_plot + robot_xy / size_scaling * 2
plt.scatter(*robot_xy_plot)
# for ray_idx in range(self._n_bins):
length_wall = self._sensor_range - reading * self._sensor_range if reading else 1e-6
ray_ori = ori - self._sensor_span * 0.5 + ray_idx / (self._n_bins - 1) * self._sensor_span
if ray_ori > math.pi:
ray_ori -= 2 * math.pi
elif ray_ori < - math.pi:
ray_ori += 2 * math.pi
# find the end point wall
end_xy = (robot_xy + length_wall * np.array([math.cos(ray_ori), math.sin(ray_ori)]))
end_xy_plot = (o_ij + end_xy / size_scaling) * 2
plt.plot([robot_xy_plot[0], end_xy_plot[0]], [robot_xy_plot[1], end_xy_plot[1]], color)
ax.set_title('sensors debug')
print('plotting now, close the window')
# plt.show(fig)
# plt.close()
def plot_state(self, name='sensors', state=None):
if state:
self.wrapped_env.reset(state)
structure = self.__class__.MAZE_STRUCTURE
size_scaling = self.__class__.MAZE_SIZE_SCALING
# duplicate cells to plot the maze
structure_plot = np.zeros(((len(structure) - 1) * 2, (len(structure[0]) - 1) * 2))
for i in range(len(structure)):
for j in range(len(structure[0])):
cell = structure[i][j]
if type(cell) is not int:
cell = 0.3 if cell == 'r' else 0.7
if i == 0:
if j == 0:
structure_plot[i, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[i, 2 * j - 1] = cell
else:
structure_plot[i, 2 * j - 1:2 * j + 1] = cell
elif i == len(structure) - 1:
if j == 0:
structure_plot[2 * i - 1, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[2 * i - 1, 2 * j - 1] = cell
else:
structure_plot[2 * i - 1, 2 * j - 1:2 * j + 1] = cell
else:
if j == 0:
structure_plot[2 * i - 1:2 * i + 1, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[2 * i - 1:2 * i + 1, 2 * j - 1] = cell
else:
structure_plot[2 * i - 1:2 * i + 1, 2 * j - 1:2 * j + 1] = cell
fig, ax = plt.subplots()
im = ax.pcolor(-np.array(structure_plot), cmap='gray', edgecolor='black', linestyle=':', lw=1)
x_labels = list(range(len(structure[0])))
y_labels = list(range(len(structure)))
ax.grid(True) # elimiate this to avoid inner lines
ax.xaxis.set(ticks=2 * np.arange(len(x_labels)), ticklabels=x_labels)
ax.yaxis.set(ticks=2 * np.arange(len(y_labels)), ticklabels=y_labels)
obs = self.get_current_maze_obs()
robot_xy = np.array(self.wrapped_env.get_body_com("torso")[:2]) # the coordinates of this are wrt the init
ori = self.get_ori() # for Ant this is computed with atan2, which gives [-pi, pi]
# compute origin cell i_o, j_o coordinates and center of it x_o, y_o (with 0,0 in the top-right corner of struc)
o_xy = np.array(self._find_robot()) # this is self.init_torso_x, self.init_torso_y: center of the cell xy!
o_ij = (o_xy / size_scaling).astype(int) # this is the position in the grid
o_xy_plot = o_xy / size_scaling * 2
robot_xy_plot = o_xy_plot + robot_xy / size_scaling * 2
plt.scatter(*robot_xy_plot)
for ray_idx in range(self._n_bins):
length_wall = self._sensor_range - obs[ray_idx] * self._sensor_range if obs[ray_idx] else 1e-6
ray_ori = ori - self._sensor_span * 0.5 + ray_idx / (self._n_bins - 1) * self._sensor_span
if ray_ori > math.pi:
ray_ori -= 2 * math.pi
elif ray_ori < - math.pi:
ray_ori += 2 * math.pi
# find the end point wall
end_xy = (robot_xy + length_wall * np.array([math.cos(ray_ori), math.sin(ray_ori)]))
end_xy_plot = (o_ij + end_xy / size_scaling) * 2
plt.plot([robot_xy_plot[0], end_xy_plot[0]], [robot_xy_plot[1], end_xy_plot[1]], 'r')
length_goal = self._sensor_range - obs[ray_idx + self._n_bins] * self._sensor_range if obs[
ray_idx + self._n_bins] else 1e-6
ray_ori = ori - self._sensor_span * 0.5 + ray_idx / (self._n_bins - 1) * self._sensor_span
# find the end point goal
end_xy = (robot_xy + length_goal * np.array([math.cos(ray_ori), math.sin(ray_ori)]))
end_xy_plot = (o_ij + end_xy / size_scaling) * 2
plt.plot([robot_xy_plot[0], end_xy_plot[0]], [robot_xy_plot[1], end_xy_plot[1]], 'g')
log_dir = logger.get_snapshot_dir()
ax.set_title('sensors: ' + name)
plt.savefig(osp.join(log_dir, name + '_sesors.png')) # this saves the current figure, here f
plt.close()
|
76008
|
import json
import sys
import csv
import argparse
import time
import subprocess
import pingparsing
# defined in parameters.py
from parameters import (
filters,
number_rules,
iterations,
iface,
bandwidth,
seed,
ipnets,
ping_interval,
ping_count,
)
from k8s import benchmark_pod_tmpl
from string import Template
parser = argparse.ArgumentParser("Test egress performance")
parser.add_argument("--username")
parser.add_argument("--client")
parser.add_argument("--server")
parser.add_argument("--mode")
args = parser.parse_args()
def get_pod(pod_name):
cmd = "kubectl get pod -n default {} -o jsonpath='{{.metadata.name}}'".format(pod_name)
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
return result.stdout.decode("utf-8")
def delete_benchmark_pod(pod_name):
exists = get_pod(pod_name)
if exists == "":
return None
cmd = "kubectl delete pod -n default {}".format(pod_name)
result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
return result.stdout.decode("utf-8")
def pod_status(pod_name):
cmd = "kubectl get pod {} -o jsonpath='{{.status.phase}}'".format(pod_name)
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
return result.stdout.decode("utf-8")
def get_output(pod_name):
phase = pod_status(pod_name)
for x in range(1,100):
if phase != "Succeeded":
time.sleep(3)
phase = pod_status(pod_name)
if phase == "Succeeded":
cmd = "kubectl logs -n default {}".format(pod_name)
result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
return result.stdout.decode("utf-8")
return None
def run_on_k8s(filter,nrules,cmd):
benchmark_cmd = Template(benchmark_pod_tmpl).substitute(count=nrules, iface=iface, seed=seed, ipnets=ipnets, filter=filter,cmd=cmd)
cmd = '''
kubectl apply -f - << EOF
{}
EOF'''.format(benchmark_cmd)
result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
return result.stdout.decode("utf-8")
benchmark_cmd_format = "sudo -E ./benchmark -count {count} -iface {iface} -seed {seed} -ipnets {ipnets} -filter {filter}"
def run_test(filter, nrules, cmd):
benchmark_cmd = benchmark_cmd_format.format(count=nrules, iface=iface, seed=seed, ipnets=ipnets, filter=filter)
cmd = "export BENCHMARK_COMMAND='{}' ; {}".format(cmd, benchmark_cmd)
return run_in_client(cmd)
iperf_cmd_format_for_k8s = "iperf3 -c {server} {mode_flags} -O 2 -t 10 -A 2 -J"
iperf_cmd_format = "docker run --name iperfclient --net=host networkstatic/iperf3 -c {server} {mode_flags} -O 2 -t 10 -A 2 -J"
def run_iperf_test(filter, nrules, mode):
flags = ""
if mode == "udp":
flags = "-u -l 1470 -b {}".format(bandwidth)
if filter == "calico" or filter == "cilium":
iperf_cmd = iperf_cmd_format_for_k8s.format(server=args.server, mode_flags=flags)
out = run_on_k8s(filter, nrules, iperf_cmd)
if not out:
return None
out = get_output("egress-benchmark")
if not out:
return None
else:
iperf_cmd = iperf_cmd_format.format(server=args.server, mode_flags=flags)
run_in_client("docker rm --force iperfclient || true")
out = run_test(filter, nrules, iperf_cmd)
if not out:
return None
#print("out is: " + out)
index = out.find("{")
if index == -1:
return None
j = json.loads(out[index:])
if mode == "udp":
key = "sum"
elif mode == "tcp":
key = "sum_received"
throughput = float((j["end"][key]["bits_per_second"]))/(10**9)
cpu = float(j["end"]["cpu_utilization_percent"]["host_total"])
return (throughput, cpu)
ping_cmd_format = "ping -i {interval} -c {count} {dest}"
def run_ping_test(filter, nrules, mode):
cmd = ping_cmd_format.format(interval=ping_interval, count=ping_count, dest=args.server)
if filter == "calico" or filter == "cilium":
result = run_on_k8s(filter, nrules, cmd)
if not result:
return None
result = get_output("egress-benchmark")
if not result:
return None
else:
result = run_test(filter, nrules, cmd)
#print("result is ---" + result)
if not result:
return None
parser = pingparsing.PingParsing()
stats = parser.parse(result)
return stats.rtt_avg
def start_iperf_server():
run_in_server("docker rm --force iperfserver || true")
run_in_server("docker run --name iperfserver -d --net=host networkstatic/iperf3 -s -A 2")
time.sleep(2)
def copy_benchmark_to_client():
cmd = "scp benchmark {}@{}:".format(args.username, args.client)
subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
def run_in_client(cmd):
return run_over_ssh(args.client, cmd)
def run_in_server(cmd):
return run_over_ssh(args.server, cmd)
def run_over_ssh(host, cmd):
cmd_to_run = 'ssh {}@{} "{}"'.format(args.username, host, cmd)
result = subprocess.run(cmd_to_run, stdout=subprocess.PIPE, shell=True)
if result.returncode != 0:
return None
return result.stdout.decode("utf-8")
def write_csv(filename, data):
with open(filename, "w", newline="") as csvfile:
writer = csv.writer(csvfile, delimiter="\t",
quotechar=";", quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Filter", "Rules"] + ["r"+str(i) for i in range(1, iterations+1)])
for (filter_name, filter_data) in data.items():
for (rules_number, results) in filter_data.items():
writer.writerow([filter_name, rules_number] + results)
# [filter][rules_number][run]
data_throughput = {}
data_cpu = {}
data_ping = {}
data_setup = {}
copy_benchmark_to_client()
start_iperf_server()
print("%\tfilter\tnrules\titeration\tthroughput\tcpu\tping\t")
number_of_tests = len(filters)*len(number_rules)*iterations
number_of_tests_executed = 0
# run the tests and collect all the data
for (filter_index, filter) in enumerate(filters):
data_throughput[filter] = {}
data_cpu[filter] = {}
data_ping[filter] = {}
data_setup[filter] = {}
for (rules_index, n) in enumerate(number_rules):
data_throughput[filter][n] = []
data_cpu[filter][n] = []
data_ping[filter][n] = []
data_setup[filter][n] = []
for i in range(iterations):
percentage = 100.0*float(number_of_tests_executed)/number_of_tests
print("{:1.0f}\t{}\t{}\t{}\t".format(percentage, filter, n, i), end="")
if filter == "calico" or filter == "cilium":
delete_benchmark_pod("egress-benchmark")
out = run_iperf_test(filter, n, args.mode)
if not out:
print("Testing iperf for {}:{}:{} failed".format(filter, n, i))
continue
if filter == "calico" or filter == "cilium":
delete_benchmark_pod("egress-benchmark")
out_ping = run_ping_test(filter, n, args.mode)
if not out_ping:
print("Testing ping for {}:{}:{} failed".format(filter, n, i))
continue
out_ping = 1000*out_ping
if filter == "calico" or filter == "cilium":
delete_benchmark_pod("egress-benchmark")
setup = run_on_k8s(filter, n, "MEASURE_SETUP_TIME")
setup = get_output("egress-benchmark")
else:
setup = run_test(filter, n, "MEASURE_SETUP_TIME")
if not setup:
print("Testing setup for {}:{}:{} failed".format(filter, n, i))
continue
setup = int(setup)/1000
print("{}\t{}\t{}\t{}".format(out[0], out[1], out_ping, setup))
number_of_tests_executed += 1
data_throughput[filter][n].append(out[0])
data_cpu[filter][n].append(out[1])
data_ping[filter][n].append(out_ping)
data_setup[filter][n].append(setup)
write_csv("throughput.csv", data_throughput)
write_csv("cpu.csv", data_cpu)
write_csv("latency.csv", data_ping)
write_csv("setup.csv", data_setup)
|
76069
|
import os
from unittest import TestCase
# most of the features of this script are already tested indirectly when
# running vensim and xmile integration tests
_root = os.path.dirname(__file__)
class TestErrors(TestCase):
def test_canonical_file_not_found(self):
from pysd.tools.benchmarking import runner
with self.assertRaises(FileNotFoundError) as err:
runner(os.path.join(_root, "more-tests/not_existent.mdl"))
self.assertIn(
'Canonical output file not found.',
str(err.exception))
def test_non_valid_model(self):
from pysd.tools.benchmarking import runner
with self.assertRaises(ValueError) as err:
runner(os.path.join(
_root,
"more-tests/not_vensim/test_not_vensim.txt"))
self.assertIn(
'Modelfile should be *.mdl or *.xmile',
str(err.exception))
def test_non_valid_outputs(self):
from pysd.tools.benchmarking import load_outputs
with self.assertRaises(ValueError) as err:
load_outputs(
os.path.join(
_root,
"more-tests/not_vensim/test_not_vensim.txt"))
self.assertIn(
"Not able to read '",
str(err.exception))
self.assertIn(
"more-tests/not_vensim/test_not_vensim.txt'.",
str(err.exception))
def test_different_frames_error(self):
from pysd.tools.benchmarking import load_outputs, assert_frames_close
with self.assertRaises(AssertionError) as err:
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_modified.csv")))
self.assertIn(
"Following columns are not close:\n\tTeacup Temperature",
str(err.exception))
self.assertNotIn(
"Column 'Teacup Temperature' is not close.",
str(err.exception))
self.assertNotIn(
"Actual values:\n\t",
str(err.exception))
self.assertNotIn(
"Expected values:\n\t",
str(err.exception))
with self.assertRaises(AssertionError) as err:
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_modified.csv")),
verbose=True)
self.assertIn(
"Following columns are not close:\n\tTeacup Temperature",
str(err.exception))
self.assertIn(
"Column 'Teacup Temperature' is not close.",
str(err.exception))
self.assertIn(
"Actual values:\n\t",
str(err.exception))
self.assertIn(
"Expected values:\n\t",
str(err.exception))
def test_different_frames_warning(self):
from warnings import catch_warnings
from pysd.tools.benchmarking import load_outputs, assert_frames_close
with catch_warnings(record=True) as ws:
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_modified.csv")),
assertion="warn")
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn(
"Following columns are not close:\n\tTeacup Temperature",
str(wu[0].message))
self.assertNotIn(
"Column 'Teacup Temperature' is not close.",
str(wu[0].message))
self.assertNotIn(
"Actual values:\n\t",
str(wu[0].message))
self.assertNotIn(
"Expected values:\n\t",
str(wu[0].message))
with catch_warnings(record=True) as ws:
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_modified.csv")),
assertion="warn", verbose=True)
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn(
"Following columns are not close:\n\tTeacup Temperature",
str(wu[0].message))
self.assertIn(
"Column 'Teacup Temperature' is not close.",
str(wu[0].message))
self.assertIn(
"Actual values:\n\t",
str(wu[0].message))
self.assertIn(
"Expected values:\n\t",
str(wu[0].message))
def test_transposed_frame(self):
from pysd.tools.benchmarking import load_outputs, assert_frames_close
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_transposed.csv"),
transpose=True))
def test_load_columns(self):
from pysd.tools.benchmarking import load_outputs
out0 = load_outputs(
os.path.join(_root, "data/out_teacup.csv"))
out1 = load_outputs(
os.path.join(_root, "data/out_teacup.csv"),
columns=["Room Temperature", "Teacup Temperature"])
out2 = load_outputs(
os.path.join(_root, "data/out_teacup_transposed.csv"),
transpose=True,
columns=["Heat Loss to Room"])
self.assertEqual(
set(out1.columns),
set(["Room Temperature", "Teacup Temperature"]))
self.assertEqual(
set(out2.columns),
set(["Heat Loss to Room"]))
self.assertTrue((out0.index == out1.index).all())
self.assertTrue((out0.index == out2.index).all())
def test_different_cols(self):
from warnings import catch_warnings
from pysd.tools.benchmarking import assert_frames_close
import pandas as pd
d1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'd': [6, 7]})
d2 = pd.DataFrame({'a': [1, 2]})
d3 = pd.DataFrame({'a': [1, 2], 'c': [3, 4]})
with self.assertRaises(ValueError) as err:
assert_frames_close(
actual=d1,
expected=d2)
self.assertIn(
"Columns from actual and expected values must be equal.",
str(err.exception))
with catch_warnings(record=True) as ws:
assert_frames_close(
actual=d1,
expected=d2,
assertion="warn")
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn("'b'", str(wu[0].message))
self.assertIn("'d'", str(wu[0].message))
self.assertIn(
"from actual values not found in expected values.",
str(wu[0].message))
with catch_warnings(record=True) as ws:
assert_frames_close(
expected=d1,
actual=d2,
assertion="warn")
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn("'b'", str(wu[0].message))
self.assertIn("'d'", str(wu[0].message))
self.assertIn(
"from expected values not found in actual values.",
str(wu[0].message))
with catch_warnings(record=True) as ws:
assert_frames_close(
actual=d1,
expected=d3,
assertion="warn")
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn("'b'", str(wu[0].message))
self.assertIn("'d'", str(wu[0].message))
self.assertIn(
"from actual values not found in expected values.",
str(wu[0].message))
self.assertIn(
"Columns 'c' from expected values not found in actual "
"values.", str(wu[0].message))
def test_invalid_input(self):
from pysd.tools.benchmarking import assert_frames_close
with self.assertRaises(TypeError) as err:
assert_frames_close(
actual=[1, 2],
expected=[1, 2])
self.assertIn(
"Inputs must both be pandas DataFrames.",
str(err.exception))
|
76090
|
from abc import abstractmethod, abstractproperty
import os
from pathlib import Path
import collections.abc
import logging
import pkg_resources
import uuid
from urllib.parse import urlparse
from typing import Set, List
import threading
import numpy as np
from frozendict import frozendict
import pyarrow as pa
import vaex
import vaex.execution
import vaex.settings
import vaex.utils
from vaex.array_types import data_type
from .column import Column, ColumnIndexed, supported_column_types
from . import array_types
from vaex import encoding
logger = logging.getLogger('vaex.dataset')
opener_classes = []
HASH_VERSION = "1"
HASH_VERSION_KEY = "version"
chunk_size_default = vaex.settings.main.chunk.size or 1024**2
_dataset_types = {}
lock = threading.Lock()
def register(cls, name=None):
name = name or getattr(cls, 'snake_name') or cls.__name__
_dataset_types[name] = cls
return cls
@encoding.register('dataset')
class dataset_encoding:
@staticmethod
def encode(encoding, dataset):
return dataset.encode(encoding)
@staticmethod
def decode(encoding, dataset_spec):
dataset_spec = dataset_spec.copy()
type = dataset_spec.pop('dataset_type')
cls = _dataset_types[type]
return cls.decode(encoding, dataset_spec)
def open(path, fs_options={}, fs=None, *args, **kwargs):
failures = []
with lock: # since we cache, make this thread save
if not opener_classes:
for entry in pkg_resources.iter_entry_points(group='vaex.dataset.opener'):
logger.debug('trying opener: ' + entry.name)
try:
opener = entry.load()
opener_classes.append(opener)
except Exception as e:
logger.exception('issue loading ' + entry.name)
failures.append((e, entry))
# first the quick path
for opener in opener_classes:
if opener.quick_test(path, fs_options=fs_options, fs=fs):
if opener.can_open(path, fs_options=fs_options, fs=fs, *args, **kwargs):
return opener.open(path, fs_options=fs_options, fs=fs, *args, **kwargs)
# otherwise try all openers
for opener in opener_classes:
try:
if opener.can_open(path, fs_options=fs_options, fs=fs, *args, **kwargs):
return opener.open(path, fs_options=fs_options, fs=fs, *args, **kwargs)
except Exception as e:
failures.append((e, opener))
failures = "\n".join([f'\n-----{who}-----\n:' + vaex.utils.format_exception_trace(e) for e, who in failures])
if failures:
raise IOError(f'Cannot open {path}, failures: {failures}.')
else:
raise IOError(f'Cannot open {path} nobody knows how to read it.')
def _to_bytes(ar):
try:
return ar.view(np.uint8)
except ValueError:
return ar.copy().view(np.uint8)
def hash_combine(*hashes):
hasher = vaex.utils.create_hasher(large_data=False)
for hash in hashes:
hasher.update(hash.encode())
return hasher.hexdigest()
def hash_slice(hash, start, end):
hasher = vaex.utils.create_hasher(hash.encode(), large_data=False)
slice = np.array([start, end], dtype=np.int64)
hasher.update(_to_bytes(slice))
return hasher.hexdigest()
def hash_array_data(ar):
# this function should stay consistent with all future versions
# since this is the expensive part of the hashing
if isinstance(ar, np.ndarray):
ar = ar.ravel()
if ar.dtype == np.object_:
return {"type": "numpy", "data": str(uuid.uuid4()), "mask": None}
if np.ma.isMaskedArray(ar):
data_byte_ar = _to_bytes(ar.data)
hasher = vaex.utils.create_hasher(data_byte_ar, large_data=True)
hash_data = {"type": "numpy", "data": hasher.hexdigest(), "mask": None}
if ar.mask is not True and ar.mask is not False and ar.mask is not np.True_ and ar.mask is not np.False_:
mask_byte_ar = _to_bytes(ar.mask)
hasher = vaex.utils.create_hasher(mask_byte_ar, large_data=True)
hash_data["mask"] = hasher.hexdigest()
return hash_data
else:
try:
byte_ar = _to_bytes(ar)
except ValueError:
byte_ar = ar.copy().view(np.uint8)
hasher = vaex.utils.create_hasher(byte_ar, large_data=True)
hash_data = {"type": "numpy", "data": hasher.hexdigest(), "mask": None}
elif isinstance(ar, (pa.Array, pa.ChunkedArray)):
hasher = vaex.utils.create_hasher(large_data=True)
buffer_hashes = []
hash_data = {"type": "arrow", "buffers": buffer_hashes}
if isinstance(ar, pa.ChunkedArray):
chunks = ar.chunks
else:
chunks = [ar]
for chunk in chunks:
for buffer in chunk.buffers():
if buffer is not None:
hasher.update(memoryview(buffer))
buffer_hashes.append(hasher.hexdigest())
else:
buffer_hashes.append(None)
elif isinstance(ar, vaex.column.Column):
hash_data = {"type": "column", "fingerprint": ar.fingerprint()}
else:
raise TypeError
return hash_data
def hash_array(ar, hash_info=None, return_info=False):
# this function can change over time, as it builds on top of the expensive part
# (hash_array_data), so we can cheaply calculate new hashes if we pass on hash_info
if hash_info is None:
hash_info = hash_array_data(ar)
if hash_info.get(HASH_VERSION_KEY) == HASH_VERSION: # TODO: semver check?
return hash_info['hash'], hash_info
if isinstance(ar, np.ndarray):
if ar.dtype == np.object_:
return hash_info['data'] # uuid, so always unique
if np.ma.isMaskedArray(ar):
if not (hash_info['type'] == 'numpy' and hash_info['data'] and hash_info['mask']):
hash_info = hash_array_data(ar)
else:
if not (hash_info['type'] == 'numpy' and hash_info['data']):
hash_info = hash_array_data(ar)
keys = [HASH_VERSION, hash_info['type'], hash_info['data']]
if hash_info['mask']:
keys.append(hash_info['mask'])
elif isinstance(ar, vaex.array_types.supported_arrow_array_types):
if not (hash_info['type'] == 'arrow' and hash_info['buffers']):
hash_info = hash_array_data(ar)
keys = [HASH_VERSION]
keys.extend(["NO_BUFFER" if not b else b for b in hash_info['buffers']])
elif isinstance(ar, vaex.column.Column):
if not (hash_info['type'] == 'column'):
hash_info = hash_array_data(ar)
keys = [HASH_VERSION]
keys.append(hash_info['fingerprint'])
hasher = vaex.utils.create_hasher(large_data=False) # small amounts of data
for key in keys:
hasher.update(key.encode('ascii'))
hash = hasher.hexdigest()
if return_info:
hash_info['hash'] = hash
hash_info[HASH_VERSION_KEY] = HASH_VERSION
return hash, hash_info
else:
return hash
def to_supported_array(ar):
if not isinstance(ar, supported_column_types):
ar = np.asanyarray(ar)
if isinstance(ar, np.ndarray) and ar.dtype.kind == 'U':
ar = vaex.column.ColumnArrowLazyCast(ar, pa.string())
elif isinstance(ar, np.ndarray) and ar.dtype.kind == 'O':
ar_data = ar
if np.ma.isMaskedArray(ar):
ar_data = ar.data
try:
# "k != k" is a way to detect NaN's and NaT's
types = list({type(k) for k in ar_data if k is not None and k == k})
except ValueError:
# If there is an array value in the column, Numpy throws a ValueError
# "The truth value of an array with more than one element is ambiguous".
# We don't handle this by default as it is a bit slower.
def is_missing(k):
if k is None:
return True
try:
# a way to detect NaN's and NaT
return not (k == k)
except ValueError:
# if a value is an array, this will fail, and it is a non-missing
return False
types = list({type(k) for k in ar_data if k is not is_missing(k)})
if len(types) == 1 and issubclass(types[0], str):
# TODO: how do we know it should not be large_string?
# self._dtypes_override[valid_name] = pa.string()
ar = vaex.column.ColumnArrowLazyCast(ar, pa.string())
if len(types) == 0: # can only be if all nan right?
ar = ar.astype(np.float64)
return ar
def _concat_chunk_list(list_of_chunks):
dict_of_list_of_arrays = collections.defaultdict(list)
for chunks in list_of_chunks:
for name, array in chunks.items():
if isinstance(array, pa.ChunkedArray):
dict_of_list_of_arrays[name].extend(array.chunks)
else:
dict_of_list_of_arrays[name].append(array)
chunks = {name: vaex.array_types.concat(arrays) for name, arrays in dict_of_list_of_arrays.items()}
return chunks
def _slice_of_chunks(chunks_ready_list, chunk_size):
current_row_count = 0
chunks_current_list = []
while current_row_count < chunk_size and chunks_ready_list:
chunks_current = chunks_ready_list.pop(0)
chunk = list(chunks_current.values())[0]
# chunks too large, split, and put back a part
if current_row_count + len(chunk) > chunk_size:
strict = True
if strict:
needed_length = chunk_size - current_row_count
current_row_count += needed_length
assert current_row_count == chunk_size
chunks_head = {name: vaex.array_types.slice(chunk, 0, needed_length) for name, chunk in chunks_current.items()}
chunks_current_list.append(chunks_head)
chunks_extra = {name: vaex.array_types.slice(chunk, needed_length) for name, chunk in chunks_current.items()}
chunks_ready_list.insert(0, chunks_extra) # put back the extra in front
else:
current_row_count += len(chunk)
chunks_current_list.append(chunks_current)
else:
current_row_count += len(chunk)
chunks_current_list.append(chunks_current)
return chunks_current_list, current_row_count
def chunk_rechunk(chunk_iter, chunk_size):
chunks_ready_list = []
i1 = i2 = 0
for _, _, chunks in chunk_iter:
chunks_ready_list.append(chunks)
total_row_count = sum([len(list(k.values())[0]) for k in chunks_ready_list])
if total_row_count > chunk_size:
chunks_current_list, current_row_count = vaex.dataset._slice_of_chunks(chunks_ready_list, chunk_size)
i2 += current_row_count
chunks = vaex.dataset._concat_chunk_list(chunks_current_list)
yield i1, i2, chunks
i1 = i2
while chunks_ready_list:
chunks_current_list, current_row_count = vaex.dataset._slice_of_chunks(chunks_ready_list, chunk_size)
i2 += current_row_count
chunks = vaex.dataset._concat_chunk_list(chunks_current_list)
yield i1, i2, chunks
i1 = i2
def _rechunk(chunk_iter, chunk_size):
def wrapper():
i1 = i2 = 0
for chunks in chunk_iter:
i2 += len(list(chunks.values())[0])
yield i1, i2, chunks
i1 = i2
yield from chunk_rechunk(wrapper(), chunk_size)
def empty_chunk_iterator(start, end, chunk_size):
length = end - start
i1 = 0
i2 = min(length, i1 + chunk_size)
while i1 < length:
yield i1, i2, {}
i1 = i2
i2 = min(length, i1 + chunk_size)
class Dataset(collections.abc.Mapping):
def __init__(self):
super().__init__()
self._columns = frozendict()
self._row_count = None
self._id = str(uuid.uuid4())
self._cached_fingerprint = None
def __repr__(self):
import yaml
data = self.__repr_data__()
return yaml.dump(data, sort_keys=False, indent=4)
def __repr_data__(self):
state = self.__getstate__()
def normalize(v):
if isinstance(v, Dataset):
return v.__repr_data__()
if isinstance(v, frozendict):
return dict(v)
if isinstance(v, vaex.dataframe.DataFrame):
return {'type': 'dataframe', 'repr': repr(v)}
if isinstance(v, np.ndarray):
return v.tolist()
return v
return {'type': self.snake_name, **{k: normalize(v) for k, v in state.items() if not k.startswith('_')}}
@property
def id(self):
'''id that uniquely identifies a dataset at runtime'''
return self.fingerprint
@property
def fingerprint(self):
'''id that uniquely identifies a dataset cross runtime, might be more expensive and require hasing'''
if self._cached_fingerprint is None:
self._cached_fingerprint = self._fingerprint
return self._cached_fingerprint
@abstractproperty
def _fingerprint(self):
pass
def encode(self, encoding):
if not encoding.has_object_spec(self.id):
spec = self._encode(encoding)
encoding.set_object_spec(self.id, spec)
return {'dataset_type': self.snake_name, 'object-id': self.id}
@classmethod
def decode(cls, encoding, spec):
id = spec['object-id']
if not encoding.has_object(id):
spec = encoding.get_object_spec(id)
ds = cls._decode(encoding, spec)
encoding.set_object(id, ds)
return encoding.get_object(id)
@abstractmethod
def _create_columns(self):
pass
@property
def name(self):
# TODO: in the future, we might want to use self.fingerprint or self.id
return "no-name"
def __getstate__(self):
state = self.__dict__.copy()
del state['_columns']
del state['_cached_fingerprint']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._cached_fingerprint = None
self._create_columns()
def schema(self, array_type=None):
return {name: vaex.array_types.data_type(col) for name, col in self.items()}
def shapes(self):
return {name: self.shape(name) for name, col in self.items()}
def _set_row_count(self):
if not self._columns:
return
values = list(self._columns.values())
self._row_count = len(values[0])
for name, value in list(self._columns.items())[1:]:
if len(value) != self._row_count:
raise ValueError(f'First columns has length {self._row_count}, while column {name} has length {len(value)}')
@property
def row_count(self):
return self._row_count
def project(self, *names):
all = set(self)
drop = all - set(names)
# we want a deterministic order for fingerprints
drop = list(drop)
drop.sort()
return self.dropped(*list(drop))
def concat(self, *others, resolver='flexible'):
datasets = []
if isinstance(self, DatasetConcatenated):
datasets.extend(self.datasets)
else:
datasets.extend([self])
for other in others:
if isinstance(other, DatasetConcatenated):
datasets.extend(other.datasets)
else:
datasets.extend([other])
return DatasetConcatenated(datasets, resolver=resolver)
def take(self, indices, masked=False):
return DatasetTake(self, indices, masked=masked)
def renamed(self, renaming):
return DatasetRenamed(self, renaming)
def merged(self, rhs):
return DatasetMerged(self, rhs)
def dropped(self, *names):
return DatasetDropped(self, names)
def __getitem__(self, item):
if isinstance(item, slice):
assert item.step in [1, None]
return self.slice(item.start or 0, item.stop or self.row_count)
return self._columns[item]
def __len__(self):
return len(self._columns)
def __iter__(self):
return iter(self._columns)
def get_data(self, i1, i2, names):
raise NotImplementedError
def __eq__(self, rhs):
if not isinstance(rhs, Dataset):
return NotImplemented
# simple case, if fingerprints are equal, the data is equal
if self.fingerprint == rhs.fingerprint:
return True
# but no the other way around
keys = set(self)
keys_hashed = set(self._ids)
missing = keys ^ keys_hashed
if missing:
return self.fingerprint == rhs.fingerprint
keys = set(rhs)
keys_hashed = set(rhs._ids)
missing = keys ^ keys_hashed
if missing:
return self.fingerprint == rhs.fingerprint
return self._ids == rhs._ids
def __hash__(self):
keys = set(self)
keys_hashed = set(self._ids)
missing = keys ^ keys_hashed
if missing:
# if we don't have hashes for all columns, we just use the fingerprint
return hash(self.fingerprint)
return hash(tuple(self._ids.items()))
def _default_lazy_chunk_iterator(self, array_map, columns, chunk_size, reverse=False):
chunk_size = chunk_size or 1024**2
chunk_count = (self.row_count + chunk_size - 1) // chunk_size
chunks = range(chunk_count)
if reverse:
chunks = reversed(chunks)
for i in chunks:
i1 = i * chunk_size
i2 = min((i + 1) * chunk_size, self.row_count)
def reader(i1=i1, i2=i2):
chunks = {k: array_map[k][i1:i2] for k in columns}
length = i2 - i1
for name, chunk in chunks.items():
assert len(chunk) == length, f'Oops, got a chunk ({name}) of length {len(chunk)} while it is expected to be of length {length} (at {i1}-{i2}'
return chunks
yield i1, i2, reader
def _default_chunk_iterator(self, array_map, columns, chunk_size, reverse=False):
for i1, i2, reader in self._default_lazy_chunk_iterator(array_map, columns, chunk_size, reverse):
yield i1, i2, reader()
@abstractmethod
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
pass
@abstractmethod
def is_masked(self, column):
pass
@abstractmethod
def shape(self, column):
pass
@abstractmethod
def close(self):
'''Close file handles or other resources, the DataFrame will not be in a usable state afterwards.'''
pass
@abstractmethod
def slice(self, start, end):
pass
@abstractmethod
def hashed(self):
pass
@abstractmethod
def leafs(self) -> List["Dataset"]:
pass
class DatasetDecorator(Dataset):
def __init__(self, original):
super().__init__()
self.original = original
def leafs(self) -> List[Dataset]:
return self.original.leafs()
def close(self):
self.original.close()
def is_masked(self, column):
return self.original.is_masked(column)
def shape(self, column):
return self.original.shape(column)
class ColumnProxy(vaex.column.Column):
'''To give the Dataset._columns object useful containers for debugging'''
ds: Dataset
def __init__(self, ds, name, type):
self.ds = ds
self.name = name
self.dtype = type
def _fingerprint(self):
fp = vaex.cache.fingerprint(self.ds.fingerprint, self.name)
return f'column-proxy-{fp}'
def __len__(self):
return self.ds.row_count
def to_numpy(self):
values = self[:]
return np.array(values)
def __getitem__(self, item):
if isinstance(item, slice):
array_chunks = []
ds = self.ds.__getitem__(item)
for chunk_start, chunk_end, chunks in ds.chunk_iterator([self.name]):
ar = chunks[self.name]
if isinstance(ar, pa.ChunkedArray):
array_chunks.extend(ar.chunks)
else:
array_chunks.append(ar)
if len(array_chunks) == 1:
return array_chunks[0]
if len(array_chunks) == 0:
return vaex.dtype(self.dtype).create_array([])
return vaex.array_types.concat(array_chunks)
else:
raise NotImplementedError
@register
class DatasetRenamed(DatasetDecorator):
snake_name = 'rename'
def __init__(self, original, renaming):
super().__init__(original)
self.renaming = renaming
self.reverse = {v: k for k, v in renaming.items()}
self._create_columns()
self._ids = frozendict({renaming.get(name, name): ar for name, ar in original._ids.items()})
self._set_row_count()
def renamed(self, renaming):
# # {'a': 'x', 'b': 'y'} and {'x': 'a', 'b': 'z', 'c', 'q'} -> {'b': 'z', 'c': 'q'}
resulting = {}
renaming = renaming.copy() # we'll modify in place
for old, new in self.renaming.items():
if new in renaming:
if old == renaming[new]:
pass # e.g. x->a->x
else:
resulting[old] = renaming[new]
del renaming[new] # we already covered this
else:
# e.g. x->a->a
resulting[old] = new
# e.g. x->x->a
resulting.update(renaming)
return DatasetRenamed(self.original, resulting)
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.fingerprint, self.renaming)
return f'dataset-{self.snake_name}-{self.original.fingerprint}'
def _create_columns(self):
self._columns = frozendict({self.renaming.get(name, name): ar for name, ar in self.original.items()})
def _encode(self, encoding):
dataset_spec = encoding.encode('dataset', self.original)
return {'renaming': dict(self.renaming), 'dataset': dataset_spec}
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
return cls(dataset, spec['renaming'])
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
for name in columns:
if name in self.renaming:
rename = self.renaming[name]
raise KeyError(f'Oops, you tried to get column {name}, but you renamed it to {rename}')
columns = [self.reverse.get(name, name) for name in columns]
for i1, i2, chunks in self.original.chunk_iterator(columns, chunk_size, reverse=reverse):
yield i1, i2, {self.renaming.get(name, name): ar for name, ar in chunks.items()}
def is_masked(self, column):
return self.original.is_masked(self.reverse.get(column, column))
def shape(self, column):
return self.original.shape(self.reverse.get(column, column))
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return type(self)(self.original.slice(start, end), self.renaming)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self.renaming)
@register
class DatasetConcatenated(Dataset):
snake_name = "concat"
def __init__(self, datasets, resolver):
super().__init__()
self.datasets = datasets
self.resolver = resolver
if self.resolver == 'strict':
for dataset in datasets[1:]:
if set(dataset) != set(datasets[0]):
l = set(dataset)
r = set(datasets[0])
diff = l ^ r
raise NameError(f'Concatenating datasets with different names: {l} and {r} (difference: {diff})')
self._schema = datasets[0].schema()
self._shapes = datasets[0].shapes()
for dataset in datasets[1:]:
if dataset.shapes() != self._shapes:
raise ValueError(f'Cannot concatenate with different shapes: {self._shapes} != {dataset.shapes()}')
for dataset in datasets[1:]:
schema = dataset.schema()
if dataset.schema() != self._schema:
raise ValueError(f'Cannot concatenate with different schemas: {self._shapes} != {dataset.shapes()}')
elif self.resolver == 'flexible':
schemas = [ds.schema() for ds in datasets]
shapes = [ds.shapes() for ds in datasets]
# try to keep the order of the original dataset
schema_list_map = {}
for schema in schemas:
for name, type in schema.items():
if name not in schema_list_map:
schema_list_map[name] = []
for name, type_list in schema_list_map.items():
for schema in schemas:
# None means it is means the column is missing
type_list.append(schema.get(name))
from .schema import resolver_flexible
# shapes
shape_list_map = {}
for shape in shapes:
for name, type in shape.items():
if name not in shape_list_map:
shape_list_map[name] = []
for name, shape_list in shape_list_map.items():
for shapes_ in shapes:
# None means it is means the column is missing
shape_list.append(shapes_.get(name))
self._schema = {}
self._shapes = {}
for name in shape_list_map:
self._schema[name], self._shapes[name] = resolver_flexible.resolve(schema_list_map[name], shape_list_map[name])
else:
raise ValueError(f'Invalid resolver {resolver}, choose between "strict" or "flexible"')
self._create_columns()
self._set_row_count()
@property
def _fingerprint(self):
ids = [ds.fingerprint for ds in self.datasets]
id = vaex.cache.fingerprint(*ids)
return f'dataset-{self.snake_name}-{id}'
def _create_columns(self):
columns = {}
hashes = {}
for name in self._schema:
columns[name] = ColumnProxy(self, name, self._schema[name])
if all(name in ds._ids for ds in self.datasets):
hashes[name] = hash_combine(*[ds._ids[name] for ds in self.datasets])
self._columns = frozendict(columns)
self._ids = frozendict(hashes)
def _encode(self, encoding, skip=set()):
datasets = encoding.encode_list('dataset', self.datasets)
spec = {'dataset_type': self.snake_name, 'datasets': datasets, 'resolver': self.resolver}
return spec
@classmethod
def _decode(cls, encoding, spec):
datasets = encoding.decode_list('dataset', spec['datasets'])
ds = cls(datasets, spec['resolver'])
return ds
def is_masked(self, column):
for dataset in self.datasets:
if column not in dataset:
return True
return any(k.is_masked(column) for k in self.datasets)
def shape(self, column):
return self._shapes[column]
def _set_row_count(self):
self._row_count = sum(ds.row_count for ds in self.datasets)
def schema(self, array_type=None):
return self._schema.copy()
def _chunk_iterator_non_strict(self, columns, chunk_size=None, reverse=False, start=0, end=None):
end = self.row_count if end is None else end
offset = 0
for dataset in self.datasets:
present = [k for k in columns if k in dataset]
# skip over whole datasets
if start >= offset + dataset.row_count:
offset += dataset.row_count
continue
# we are past the end
if end <= offset:
break
for i1, i2, chunks in dataset.chunk_iterator(present, chunk_size=chunk_size, reverse=reverse):
# chunks = {name: vaex.array_types.to_arrow(ar) for name, ar in chunks.items()}
length = i2 - i1
chunk_start = offset
chunk_end = offset + length
if start >= chunk_end: # we didn't find the beginning yet
offset += length
continue
if end <= chunk_start: # we are past the end
# assert False
break
if start > chunk_start:
# this means we have to cut off a piece of the beginning
if end < chunk_end:
# AND the end
length = end - chunk_start # without the start cut off
length -= start - chunk_start # correcting for the start cut off
assert length > 0
chunks = {name: vaex.array_types.slice(ar, start - chunk_start, length) for name, ar in chunks.items()}
for name, ar in chunks.items():
assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'
else:
length -= start - chunk_start # correcting for the start cut off
assert length > 0
chunks = {name: vaex.array_types.slice(ar, start - chunk_start) for name, ar in chunks.items()}
for name, ar in chunks.items():
assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'
else:
if end < chunk_end:
# we only need to cut off a piece of the end
length = end - chunk_start
assert length > 0
chunks = {name: vaex.array_types.slice(ar, 0, length) for name, ar in chunks.items()}
for name, ar in chunks.items():
assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'
from .schema import resolver_flexible
allchunks = {name: resolver_flexible.align(length, chunks.get(name), self._schema[name], self._shapes[name]) for name in columns}
yield {k: allchunks[k] for k in columns}
offset += (i2 - i1)
def chunk_iterator(self, columns, chunk_size=None, reverse=False, start=0, end=None):
chunk_size = chunk_size or 1024*1024
i1 = 0
i1 = i2 = 0
if not columns:
end = self.row_count if end is None else end
yield from empty_chunk_iterator(start, end, chunk_size)
else:
chunk_iterator = self._chunk_iterator_non_strict(columns, chunk_size, reverse=reverse, start=start, end=self.row_count if end is None else end)
yield from _rechunk(chunk_iterator, chunk_size)
def close(self):
for ds in self.datasets:
ds.close()
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
# TODO: we can be smarter here, and trim off some datasets
return DatasetSliced(self, start=start, end=end)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)([dataset.hashed() for dataset in self.datasets], resolver=self.resolver)
def leafs(self) -> List[Dataset]:
return [self]
# def leafs(self) -> List[Dataset]:
# leafs = list()
# for ds in self.datasets:
# leafs.extend(ds.leafs())
# return leafs
@register
class DatasetTake(DatasetDecorator):
snake_name = "take"
def __init__(self, original, indices, masked):
super().__init__(original)
self.indices = indices
self.masked = masked
self._lazy_hash_index = None
self._create_columns()
self._set_row_count()
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.fingerprint, self._hash_index, self.masked)
return f'dataset-{self.snake_name}-{id}'
@property
def _hash_index(self):
if self._lazy_hash_index is None:
self._lazy_hash_index = hash_array(self.indices)
return self._lazy_hash_index
def _create_columns(self):
# if the columns in ds already have a ColumnIndex
# we could do, direct_indices = df.column['bla'].indices[indices]
# which should be shared among multiple ColumnIndex'es, so we store
# them in this dict
direct_indices_map = {}
columns = {}
hashes = {}
for name, column in self.original.items():
columns[name] = ColumnIndexed.index(column, self.indices, direct_indices_map, masked=self.masked)
if name in self.original._ids:
hashes[name] = hash_combine(self._hash_index, self.original._ids[name])
self._columns = frozendict(columns)
self._ids = frozendict(hashes)
def _encode(self, encoding, skip=set()):
dataset_spec = encoding.encode('dataset', self.original)
spec = {'dataset_type': self.snake_name, 'dataset': dataset_spec}
spec['indices'] = encoding.encode('array', self.indices)
spec['masked'] = self.masked
return spec
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
indices = encoding.decode('array', spec['indices'])
ds = cls(dataset, indices, spec['masked'])
return ds
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
# TODO: we may be able to do this slightly more efficient by first
# materializing the columns
yield from self._default_chunk_iterator(self._columns, columns, chunk_size, reverse=reverse)
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return DatasetSlicedArrays(self, start=start, end=end)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self.indices, self.masked)
@register
class DatasetFiltered(DatasetDecorator):
snake_name = 'filter'
def __init__(self, original, filter, expected_length=None, state=None, selection=None):
super().__init__(original)
self._filter = filter
self._lazy_hash_filter = None
self._create_columns()
self._row_count = np.sum(self._filter).item()
self.state = state
self.selection = selection
if expected_length is not None:
if expected_length != self._row_count:
raise ValueError(f'Expected filter to have {expected_length} true values, but counted {self._row_count}')
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.id, self._hash_index, self.state, self.selection)
return f'dataset-{self.snake_name}-{id}'
@property
def _hash_index(self):
if self._lazy_hash_filter is None:
self._lazy_hash_filter = hash_array(self._filter)
return self._lazy_hash_filter
def _create_columns(self):
columns = {name: vaex.dataset.ColumnProxy(self, name, data_type(col)) for name, col in self.original._columns.items()}
hashes = {}
for name, column in self.original.items():
if name in self.original._ids:
hashes[name] = hash_combine(self._hash_index, self.original._ids[name])
self._columns = frozendict(columns)
self._ids = frozendict(hashes)
def _encode(self, encoding, skip=set()):
dataset_spec = encoding.encode('dataset', self.original)
spec = {'dataset': dataset_spec}
if self.state is not None and self.selection is not None:
spec['state'] = encoding.encode('dataframe-state', self.state)
spec['selection'] = encoding.encode('selection', self.selection)
spec['filter_array'] = encoding.encode('array', self._filter)
return spec
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
if 'filter_array' in spec:
filter = encoding.decode('array', spec['filter_array'])
ds = cls(dataset, filter)
else:
state = encoding.decode('dataframe-state', spec['state'])
selection = encoding.decode('selection', spec['selection'])
df = vaex.from_dataset(dataset)
df.state_set(state)
df.set_selection(vaex.dataframe.FILTER_SELECTION_NAME, selection)
df._push_down_filter()
filter = df.dataset.filter
ds = cls(dataset, filter, state=state, selection=selection)
return ds
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
chunk_size = chunk_size or 1024**2
if not columns:
end = self.row_count
length = end
i1 = i2 = 0
i2 = min(length, i1 + chunk_size)
while i1 < length:
yield i1, i2, {}
i1 = i2
i2 = min(length, i1 + chunk_size)
return
def filtered_chunks():
for i1, i2, chunks in self.original.chunk_iterator(columns, chunk_size=chunk_size, reverse=reverse):
chunks_filtered = {name: vaex.array_types.filter(ar, self._filter[i1:i2]) for name, ar in chunks.items()}
yield chunks_filtered
yield from _rechunk(filtered_chunks(), chunk_size)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self._filter)
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
expected_length = end - start
mask = vaex.superutils.Mask(memoryview(self._filter))
start, end = mask.indices(start, end-1)
end += 1
filter = self._filter[start:end]
assert filter.sum() == expected_length
return type(self)(self.original.slice(start, end), filter)
@register
class DatasetSliced(DatasetDecorator):
snake_name = "slice"
def __init__(self, original, start, end):
super().__init__(original)
self.start = start
self.end = end
self._row_count = end - start
self._create_columns()
# self._ids = {}
self._ids = frozendict({name: hash_slice(hash, start, end) for name, hash in original._ids.items()})
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.fingerprint, self.start, self.end)
return f'dataset-{self.snake_name}-{id}'
def leafs(self) -> List[Dataset]:
# we don't want to propagate slicing
return [self]
def _encode(self, encoding, skip=set()):
dataset_spec = encoding.encode('dataset', self.original)
return {'dataset': dataset_spec, 'start': self.start, 'end': self.end}
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
return cls(dataset, spec['start'], spec['end'])
def _create_columns(self):
self._columns = {name: vaex.dataset.ColumnProxy(self, name, data_type(col)) for name, col in self.original._columns.items()}
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
yield from self.original.chunk_iterator(columns, chunk_size=chunk_size, reverse=reverse, start=self.start, end=self.end)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self.start, self.end)
def slice(self, start, end):
length = end - start
start += self.start
end = start + length
if end > self.original.row_count:
raise IndexError(f'Slice end ({end}) if larger than number of rows: {self.original.row_count}')
return type(self)(self.original, start, end)
@register
class DatasetSlicedArrays(DatasetDecorator):
snake_name = 'slice_arrays'
def __init__(self, original, start, end):
super().__init__(original)
# maybe we want to avoid slicing twice, and collapse it to 1?
self.start = start
self.end = end
# TODO: this is the old dataframe.trim method, we somehow need to test/capture that
# if isinstance(column, array_types.supported_array_types): # real array
# df.columns[name] = column[self._index_start:self._index_end]
# else:
# df.columns[name] = column.trim(self._index_start, self._index_end)
self._create_columns()
self._ids = frozendict({name: hash_slice(hash, start, end) for name, hash in original._ids.items()})
self._set_row_count()
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.fingerprint, self.start, self.end)
return f'dataset-{self.snake_name}-{id}'
def leafs(self) -> List[Dataset]:
# we don't want to propagate slicing
return [self]
def _create_columns(self):
columns = {}
for name, column in self.original.items():
if isinstance(column, array_types.supported_array_types): # real array
column = column[self.start:self.end]
else:
column = column.trim(self.start, self.end)
columns[name] = column
self._columns = frozendict(columns)
def _encode(self, encoding, skip=set()):
dataset_spec = encoding.encode('dataset', self.original)
return {'dataset': dataset_spec, 'start': self.start, 'end': self.end}
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
return cls(dataset, spec['start'], spec['end'])
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
yield from self._default_chunk_iterator(self._columns, columns, chunk_size, reverse=reverse)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self.start, self.end)
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
length = end - start
start += self.start
end = start + length
if end > self.original.row_count:
raise IndexError(f'Slice end ({end}) if larger than number of rows: {self.original.row_count}')
return type(self)(self.original, start, end)
@register
class DatasetDropped(DatasetDecorator):
snake_name = "drop"
def __init__(self, original, names):
super().__init__(original)
self._dropped_names = tuple(names)
self._create_columns()
self._ids = frozendict({name: ar for name, ar in original._ids.items() if name not in names})
self._set_row_count()
def dropped(self, *names):
return DatasetDropped(self.original, self._dropped_names + names)
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.fingerprint, self._dropped_names)
return f'dataset-{self.snake_name}-{id}'
def _create_columns(self):
self._columns = frozendict({name: ar for name, ar in self.original.items() if name not in self._dropped_names})
def _encode(self, encoding):
dataset_spec = encoding.encode('dataset', self.original)
return {'dataset': dataset_spec, 'names': list(self._dropped_names)}
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
ds = cls(dataset, spec['names'])
return ds
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
for column in columns:
if column in self._dropped_names:
raise KeyError(f'Oops, you tried to get column {column} while it is actually dropped')
yield from self.original.chunk_iterator(columns, chunk_size=chunk_size, reverse=reverse)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self._dropped_names)
def close(self):
self.original.close()
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return type(self)(self.original.slice(start, end), self._dropped_names)
@register
class DatasetMerged(Dataset):
snake_name = "merge"
def __init__(self, left, right):
super().__init__()
self.left = left
self.right = right
if self.left.row_count != self.right.row_count:
raise ValueError(f'Merging datasets with unequal row counts ({self.left.row_count} != {self.right.row_count})')
self._row_count = self.left.row_count
overlap = set(left) & set(right)
if overlap:
raise NameError(f'Duplicate names: {overlap}')
self._create_columns()
self._ids = frozendict({**left._ids, **right._ids})
self._set_row_count()
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.left.fingerprint, self.right.fingerprint)
return f'dataset-{self.snake_name}-{id}'
def leafs(self) -> List[Dataset]:
return self.left.leafs() + self.right.leafs()
def _create_columns(self):
# TODO: for DatasetArray, we might want to just do this?
# self._columns = frozendict({**left._columns, **right._columns})
self._columns = {**{name: ColumnProxy(self.left, name, data_type(col)) for name, col in self.left._columns.items()},
**{name: ColumnProxy(self.right, name, data_type(col)) for name, col in self.right._columns.items()}}
def _encode(self, encoding, skip=set()):
dataset_spec_left = encoding.encode('dataset', self.left)
dataset_spec_right = encoding.encode('dataset', self.right)
spec = {'left': dataset_spec_left, 'right': dataset_spec_right}
return spec
@classmethod
def _decode(cls, encoding, spec):
left = encoding.decode('dataset', spec['left'])
right = encoding.decode('dataset', spec['right'])
ds = cls(left, right)
return ds
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
columns_left = [k for k in columns if k in self.left]
columns_right = [k for k in columns if k in self.right]
if not columns_left:
yield from self.right.chunk_iterator(columns, chunk_size, reverse=reverse)
elif not columns_right:
yield from self.left.chunk_iterator(columns, chunk_size, reverse=reverse)
else:
for (i1, i2, ichunks), (j1, j2, jchunks) in zip(
self.left.chunk_iterator(columns_left, chunk_size, reverse=reverse),
self.right.chunk_iterator(columns_right, chunk_size, reverse=reverse)):
# TODO: if one of the datasets does not respect the chunk_size (e.g. parquet)
# this might fail
assert i1 == j1
assert i2 == j2
yield i1, i2, {**ichunks, **jchunks}
def is_masked(self, column):
if column in self.left:
return self.left.is_masked(column)
else:
return self.right.is_masked(column)
def shape(self, column):
if column in self.left:
return self.left.shape(column)
else:
return self.right.shape(column)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.left.hashed(), self.right.hashed())
def close(self):
self.left.close()
self.right.close()
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return type(self)(self.left.slice(start, end), self.right.slice(start, end))
@register
class DatasetArrays(Dataset):
snake_name = "arrays"
def __init__(self, mapping=None, hashed=True, **kwargs):
super().__init__()
if mapping is None:
mapping = {}
columns = {**mapping, **kwargs}
columns = {key: to_supported_array(ar) for key, ar in columns.items()}
# TODO: we finally want to get rid of datasets with no columns
self._columns = frozendict(columns)
if hashed:
self._ids = frozendict({key: hash_array(array) for key, array in self._columns.items()})
else:
self._ids = frozendict()
self._set_row_count()
@property
def id(self):
try:
# requires hashing and is expensive
return self.fingerprint
except ValueError:
return f'dataset-{self.snake_name}-uuid4-{self._id}'
@property
def _fingerprint(self):
keys = set(self)
keys_hashed = set(self._ids)
missing = keys ^ keys_hashed
if missing:
# if we don't have hashes for all columns, we do it like id
return f'dataset-{self.snake_name}-uuid4-{self._id}'
# self.__hash__() # invoke just to check we don't have missing hashes
# but Python's hash functions are not deterministic (cross processs)
fp = vaex.cache.fingerprint(tuple(self._ids.items()))
return f'dataset-{self.snake_name}-hashed-{fp}'
def leafs(self) -> List[Dataset]:
return [self]
def _encode(self, encoding):
arrays = encoding.encode_dict('array', self._columns)
spec = {'dataset_type': self.snake_name, 'arrays': arrays}
if self._ids:
fingerprints = dict(self._ids)
spec['fingerprints'] = fingerprints
return spec
@classmethod
def _decode(cls, encoding, spec):
arrays = encoding.decode_dict('array', spec['arrays'])
ds = cls(arrays)
if 'fingerprints' in spec:
ds._ids = frozendict(spec['fingerprints'])
return ds
def __getstate__(self):
state = self.__dict__.copy()
# here, we actually DO want to keep the columns
# del state['_columns']
return state
def __setstate__(self, state):
super().__setstate__(state)
def _create_columns(self):
pass
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
yield from self._default_chunk_iterator(self._columns, columns, chunk_size, reverse=reverse)
def is_masked(self, column):
ar = self._columns[column]
if not isinstance(ar, np.ndarray):
ar = ar[0:1] # take a small piece
if isinstance(ar, np.ndarray):
return np.ma.isMaskedArray(ar)
else:
return False # an arrow array always has null value options
def shape(self, column):
ar = self._columns[column]
if not isinstance(ar, np.ndarray):
ar = ar[0:1] # take a small piece
if isinstance(ar, vaex.array_types.supported_arrow_array_types):
return tuple()
else:
return ar.shape[1:]
def merged(self, rhs):
# TODO: if we don't allow emtpy datasets, we can remove this method
if len(self) == 0:
return rhs
if len(rhs) == 0:
return self
# TODO: this is where we want to check if both are array like
# and have faster version of merged
return DatasetMerged(self, rhs)
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return DatasetSlicedArrays(self, start=start, end=end)
def hashed(self):
if set(self._ids) == set(self):
return self
new = type(self)(self._columns)
new._ids = frozendict({key: hash_array(array) for key, array in new._columns.items()})
return new
def close(self):
pass # nothing to do, maybe drop a refcount?
# TODO: we might want to really get rid of these, since we want to avoid copying them over the network?
# def dropped(self, names):
class DatasetFile(Dataset):
"""Datasets that map to a file can keep their ids/hashes in the file itself,
or keep them in a meta file.
"""
def __init__(self, path, write=False, fs_options={}, fs=None):
super().__init__()
self.path = path
self.fs_options = fs_options
self.fs = fs
self.write = write
self._columns = {}
self._ids = {}
self._frozen = False
self._hash_calculations = 0 # track it for testing purposes
self._hash_info = {}
self._hash_cache_needs_write = False
self._read_hashes()
@property
def name(self):
base, ext, fs_options = vaex.file.split_ext(self.path)
base = os.path.basename(base)
return base
@property
def _fingerprint(self):
if set(self._ids) == set(self):
fingerprint = vaex.cache.fingerprint(dict(self._ids))
return f'dataset-{self.snake_name}-hashed-{fingerprint}'
else:
# TODO: if the dataset is hashed, return a fingerprint based on that
fingerprint = vaex.file.fingerprint(self.path, fs_options=self.fs_options, fs=self.fs)
return f'dataset-{self.snake_name}-{fingerprint}'
def leafs(self) -> List[Dataset]:
return [self]
def _create_columns(self):
pass
@classmethod
def quick_test(cls, path, fs_options={}, fs=None, *args, **kwargs):
return False
@classmethod
def open(cls, path, *args, **kwargs):
return cls(path, *args, **kwargs)
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
yield from self._default_chunk_iterator(self._columns, columns, chunk_size, reverse=reverse)
def is_masked(self, column):
ar = self._columns[column]
if not isinstance(ar, np.ndarray):
ar = ar[0:1] # take a small piece
if isinstance(ar, np.ndarray):
return np.ma.isMaskedArray(ar)
else:
return False # an arrow array always has null value options
def shape(self, column):
ar = self._columns[column]
if not isinstance(ar, np.ndarray):
ar = ar[0:1] # take a small piece
if isinstance(ar, vaex.array_types.supported_arrow_array_types):
return tuple()
else:
return ar.shape[1:]
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return DatasetSlicedArrays(self, start=start, end=end)
def _read_hashes(self):
path_hashes = Path(self.path + '.d') / 'hashes.yaml'
try:
exists = path_hashes.exists()
except OSError: # happens for windows py<38
exists = False
if exists:
with path_hashes.open() as f:
hashes = vaex.utils.yaml_load(f)
if hashes is None:
raise ValueError(f'{path_hashes} was probably truncated due to another process writing.')
self._hash_info = hashes.get('columns', {})
def _freeze(self):
self._ids = frozendict(self._ids)
self._columns = frozendict(self._columns)
self._set_row_count()
self._frozen = True
if self._hash_cache_needs_write:
self._write_hash_info()
def encode(self, encoding, skip=set()):
spec = {'dataset_type': self.snake_name,
'write': self.write,
'path': self.path,
'fs_options': self.fs_options,
'fs': self.fs}
return spec
def __getstate__(self):
# we don't have the columns in the state, since we should be able
# to get them from disk again
return {
'write': self.write,
'path': self.path,
'fs_options': self.fs_options,
'fs': self.fs,
'_ids': dict(self._ids) # serialize the hases as non-frozen dict
}
def __setstate__(self, state):
super().__setstate__(state)
# 'ctor' like initialization
self._frozen = False
self._hash_calculations = 0
self._columns = {}
self._hash_info = {}
self._hash_cache_needs_write = False
self._read_hashes()
def add_column(self, name, data):
self._columns[name] = data
if self.write:
return # the columns don't include the final data
# the hashes will be done in .freeze()
hash_info = self._hash_info.get(name)
if hash_info:
hash_info_previous = hash_info.copy()
hash, hash_info = hash_array(data, hash_info, return_info=True)
if hash_info_previous != hash_info:
self._hash_cache_needs_write = True
self._ids[name] = hash
self._hash_info[name] = hash_info # always update the information
@property
def _local_hash_path(self):
# TODO: support s3 and gcs
# TODO: fallback directory when a user cannot write
if Path(self.path).exists():
directory = Path(self.path + '.d')
directory.mkdir(exist_ok=True)
else:
o = urlparse(self.path)
directory = Path(vaex.utils.get_private_dir('dataset', o.scheme, o.netloc, o.path[1:]))
return directory / 'hashes.yaml'
def hashed(self):
if set(self._ids) == set(self):
return self
cls = type(self)
# use pickle protocol to clone
new = cls.__new__(cls)
new.__setstate__(self.__getstate__())
hashes = {}
disk_cached_hashes = {}
for name, column in new.items():
hash_info = self._hash_info.get(name)
if hash_info is None:
logging.warning(f'Calculating hash for column {name} of length {len(column)} (1 time operation, will be cached on disk)')
hash_info = hash_array_data(column)
hash, hash_info = hash_array(column, hash_info, return_info=True)
new._hash_calculations += 1
hashes[name] = hash
disk_cached_hashes[name] = hash_info
new._ids = frozendict(hashes)
new._hash_info = frozendict(disk_cached_hashes)
path_hashes = new._local_hash_path
# TODO: without this check, if multiple processes are writing (e.g. tests/execution_test.py::test_task_sum with ray)
# this leads to a race condition, where we write the file, and while truncated, _read_hases() fails (because the file exists)
# if new._hash_info != new._ids:
new._write_hash_info()
return new
def _write_hash_info(self):
if self._hash_info: # TODO: file lock
path_hashes = self._local_hash_path
with path_hashes.open('w') as f:
vaex.utils.yaml_dump(f, {'columns': dict(self._hash_info)})
class DatasetCached(DatasetDecorator):
snake_name = "cached"
shared_cache = {}
def __init__(self, original, names, cache=None, to_numpy=False):
super(DatasetCached, self).__init__(original)
self.original = original
self.names = names
self._shared = cache is None or cache is self.shared_cache
self.cache = cache if cache is not None else self.shared_cache
self.to_numpy = to_numpy
self._create_columns()
self._row_count = self.original.row_count
@property
def _fingerprint(self):
return self.original.fingerprint
def _create_columns(self):
columns = {}
schema = self.original.schema()
for name, column in self.original.items():
columns[name] = ColumnProxy(self, name, schema[name])
self._columns = frozendict(columns)
self._ids = frozendict(self.original._ids)
def _encode(self, encoding, skip=set()):
raise NotImplementedError("cannot serialize cache")
@classmethod
def _decode(cls, encoding, spec):
raise NotImplementedError("cannot serialize cache")
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
chunk_size = chunk_size or chunk_size_default
columns_all = set(columns)
columns_cachable = columns_all & set(self.names)
# avoids asking the cache twice, by using .get() and then testing for None
columns_cached = {name: self.cache.get(self._cache_key(name)) for name in columns_cachable}
columns_cached = {name: array for name, array in columns_cached.items() if array is not None}
columns_to_cache = columns_cachable - set(columns_cached)
column_required = columns_all - set(columns_cached)
cache_chunks = {name: [] for name in columns_to_cache}
def cached_iterator():
chunks_list = [chunks for name, chunks in columns_cached.items()]
# chunks_list is of form [[ar1x, ar2x, a3x], [ar1y, ar2y, a3y]]
# and now we want to yield
# * i1, i2 {'x': ar1x, 'y': ar1y}
# * i1, i2 {'x': ar2x, 'y': ar2y}
# * i1, i2 {'x': ar3x, 'y': ar3y}
names = [name for name, chunks in columns_cached.items()]
i1 = 0
i2 = 0
for chunks in zip(*chunks_list):
i2 += len(chunks[0])
for chunk in chunks:
assert len(chunk) == len(chunks[0])
yield i1, i2, dict(zip(names, chunks))
i1 = i2
if columns_cached:
cached_iter = chunk_rechunk(cached_iterator(), chunk_size)
else:
cached_iter = empty_chunk_iterator(0, self.row_count, chunk_size)
if column_required:
original_iter = self.original.chunk_iterator(column_required, chunk_size, reverse=reverse)
else:
original_iter = empty_chunk_iterator(0, self.row_count, chunk_size)
original_iter = list(original_iter)
cached_iter = list(cached_iter)
for (o1, o2, ochunks), (c1, c2, cchunks) in zip(original_iter, cached_iter):
assert o1 == c1
assert o2 == c2
yield o1, o2, {**ochunks, **cchunks}
for name in columns_to_cache:
if self.to_numpy:
ochunks = {k: vaex.array_types.to_numpy(v) for k, v in ochunks.items()}
cache_chunks[name].append(ochunks[name])
# we write it too the cache in 1 go
for name in columns_to_cache:
self.cache[self._cache_key(name)] = cache_chunks[name]
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return type(self)(self.original.slice(start, end), self.names, cache=self.cache)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self.names, cache=self.cache)
def _cache_key(self, name):
return f"{self.fingerprint}-{name}"
|
76142
|
import os
import argparse
import pyautogui
import time
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--path", help="absolute path to store screenshot.", default=r"./images")
parser.add_argument("-t", "--type", help="h (in hour) or m (in minutes) or s (in seconds)", default='h')
parser.add_argument("-f", "--frequency", help="frequency for taking screenshot per h/m/s.", default=1, type=int)
args = parser.parse_args()
sec = 0.
if args.type == 'h':
sec = 60 * 60 / args.frequency
elif args.type == 'm':
sec = 60 / args.frequency
if sec < 1.:
sec = 1.
if os.path.isdir(args.path) != True:
os.mkdir(args.path)
try:
while True:
t = time.localtime()
current_time = time.strftime("%H_%M_%S", t)
file = current_time + ".jpg"
image = pyautogui.screenshot(os.path.join(args.path,file))
print(f"{file} saved successfully.\n")
time.sleep(sec)
except KeyboardInterrupt:
print("End of script by user interrupt")
|
76165
|
import os, sys, inspect
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from core.bounds import WSR_mu_plus
from core.concentration import get_tlambda, get_lhat_from_table, get_lhat_from_table_binarysearch
import numpy as np
from scipy.optimize import brentq
from tqdm import tqdm
import pdb
def get_coco_example_loss_and_size_tables(lambdas_example_table):
lam_len = len(lambdas_example_table)
lam_low = min(lambdas_example_table)
lam_high = max(lambdas_example_table)
fname_loss = f'../coco/.cache/{lam_low}_{lam_high}_{lam_len}_example_loss_table.npy'
fname_sizes = f'../coco/.cache/{lam_low}_{lam_high}_{lam_len}_example_size_table.npy'
loss_table = np.load(fname_loss)
sizes_table = np.load(fname_sizes)
return loss_table, sizes_table
if __name__ == "__main__":
n_cal = int(4000)
dataset_replicates = 5
n_lambda = 10000
n_reps = int(1e2)
epsilon = 1e-10
maxiters = int(1e5)
num_grid_bennett = 1000
mus = [0.05, 0.1, 0.2]
deltas = [0.001, 0.01, 0.05, 0.1]
lambdas_table = np.linspace(0,1,n_lambda)
delta = .1
gamma = .1
# get losses
example_loss_table, _ = get_coco_example_loss_and_size_tables(lambdas_table)
example_loss_table = np.concatenate( (example_loss_table,)*dataset_replicates, axis=0 )
example_loss_table = example_loss_table + np.random.uniform(size=example_loss_table.shape)/100
risks = np.zeros((n_reps,))
# get the bound
bound_str = 'WSR'
bound_fn = WSR_mu_plus
tlambda = get_tlambda(1500,deltas,n_cal,None,None,None,epsilon,maxiters,bound_str,bound_fn)
for j in tqdm(range(n_reps)):
np.random.shuffle(example_loss_table)
calib_loss_table, val_loss_table = (example_loss_table[:n_cal], example_loss_table[n_cal:])
# get lhat (should be close to gamma)
lhat = get_lhat_from_table_binarysearch(calib_loss_table, lambdas_table, gamma, delta, tlambda, bound_str)
val_losses = val_loss_table[:,np.argmax(lambdas_table == lhat)]
risks[j] = val_losses.mean()
print(f"dataset replicates: {dataset_replicates}")
print((risks > gamma).mean())
print(risks)
|
76216
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("WRITE")
process.source = cms.Source("EmptySource", numberEventsInLuminosityBlock = cms.untracked.uint32(4))
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(20))
process.out = cms.OutputModule("PoolOutputModule", fileName = cms.untracked.string("multi_lumi.root"))
process.o = cms.EndPath(process.out)
|
76222
|
import copy
from django.http import HttpResponseBadRequest
from django.contrib.sitemaps import views as django_sitemaps_views
def custom_sitemap_index(request, sitemaps, section=None, template_name='sitemap.xml', content_type='application/xml'):
platform = request.GET.get('platform', None)
platform_in = request.GET.get('platform__in', None)
domain = request.GET.get('domain', None)
if not domain:
return HttpResponseBadRequest(content='must pass in domain as a querystring argument')
platform_filters = None
platform_selection = platform or platform_in
if platform_selection:
platform_filters = platform_selection.split(',')
sitemaps_copy = copy.deepcopy(sitemaps)
for section, site in sitemaps_copy.items():
if callable(site):
sitemaps_copy[section] = site(platform_filters=platform_filters, domain=domain)
return django_sitemaps_views.sitemap(request, sitemaps_copy, None, template_name, content_type)
|
76226
|
from jmetal.algorithm.singleobjective.simulated_annealing import SimulatedAnnealing
from jmetal.operator import BitFlipMutation
from jmetal.problem import OneMax
from jmetal.util.solution import print_function_values_to_file, print_variables_to_file
from jmetal.util.termination_criterion import StoppingByEvaluations
if __name__ == '__main__':
problem = OneMax(number_of_bits=1024)
max_evaluations = 20000
algorithm = SimulatedAnnealing(
problem=problem,
mutation=BitFlipMutation(probability=1.0 / problem.number_of_bits),
termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations)
)
algorithm.run()
result = algorithm.get_result()
# Save results to file
print_function_values_to_file(result, 'FUN.'+ algorithm.get_name() + "." + problem.get_name())
print_variables_to_file(result, 'VAR.' + algorithm.get_name() + "." + problem.get_name())
print('Algorithm: ' + algorithm.get_name())
print('Problem: ' + problem.get_name())
print('Solution: ' + result.get_binary_string())
print('Fitness: ' + str(result.objectives[0]))
print('Computing time: ' + str(algorithm.total_computing_time))
|
76255
|
import ledshim
class Leds(object):
def __init__(self):
self.count = 24
def set_one(self, led_number, color):
ledshim.set_pixel(led_number, *color)
def set_range(self, a_range, color):
ledshim.set_multiple_pixels(a_range, color)
def set_all(self, color):
ledshim.set_all(*color)
def clear(self):
ledshim.clear()
def show(self):
ledshim.show()
|
76367
|
import pytest
from yamlpath.merger.enums.anchorconflictresolutions import (
AnchorConflictResolutions)
class Test_merger_enum_anchorconflictresolutions():
"""Tests for the AnchorConflictResolutions enumeration."""
def test_get_names(self):
assert AnchorConflictResolutions.get_names() == [
"STOP",
"LEFT",
"RIGHT",
"RENAME",
]
def test_get_choices(self):
assert AnchorConflictResolutions.get_choices() == [
"left",
"rename",
"right",
"stop",
]
@pytest.mark.parametrize("input,output", [
("STOP", AnchorConflictResolutions.STOP),
("LEFT", AnchorConflictResolutions.LEFT),
("RIGHT", AnchorConflictResolutions.RIGHT),
("RENAME", AnchorConflictResolutions.RENAME),
])
def test_from_str(self, input, output):
assert output == AnchorConflictResolutions.from_str(input)
def test_from_str_nameerror(self):
with pytest.raises(NameError):
AnchorConflictResolutions.from_str("NO SUCH NAME")
|
76386
|
import numpy as np
import pandas as pd
from statsmodels.sandbox.stats.multicomp import multipletests
import regreg.api as rr
from ...api import (randomization,
glm_group_lasso,
multiple_queries)
from ...tests.instance import (gaussian_instance,
logistic_instance)
from ...tests.flags import SMALL_SAMPLES, SET_SEED
from ...tests.decorators import (wait_for_return_value,
set_seed_iftrue,
set_sampling_params_iftrue)
from ..query import naive_confidence_intervals, naive_pvalues
from ..M_estimator import restricted_Mest
from ..cv_view import CV_view
from ..glm import (glm_nonparametric_bootstrap,
pairs_bootstrap_glm)
if SMALL_SAMPLES:
nboot = 10
else:
nboot = -1
@set_seed_iftrue(SET_SEED)
@set_sampling_params_iftrue(SMALL_SAMPLES, burnin=10, ndraw=10)
@wait_for_return_value()
def test_cv(n=100, p=50, s=5, signal=7.5, K=5, rho=0.,
randomizer = 'gaussian',
randomizer_scale = 1.,
scale1 = 0.1,
scale2 = 0.2,
lam_frac = 1.,
glmnet = True,
loss = 'gaussian',
bootstrap = False,
condition_on_CVR = True,
marginalize_subgrad = True,
ndraw = 10000,
burnin = 2000,
nboot = nboot):
print(n,p,s, condition_on_CVR, scale1, scale2)
if randomizer == 'laplace':
randomizer = randomization.laplace((p,), scale=randomizer_scale)
elif randomizer == 'gaussian':
randomizer = randomization.isotropic_gaussian((p,),randomizer_scale)
elif randomizer == 'logistic':
randomizer = randomization.logistic((p,), scale=randomizer_scale)
if loss == "gaussian":
X, y, beta, nonzero, sigma = gaussian_instance(n=n, p=p, s=s, rho=rho, signal=signal, sigma=1)
glm_loss = rr.glm.gaussian(X, y)
elif loss == "logistic":
X, y, beta, _ = logistic_instance(n=n, p=p, s=s, rho=rho, signal=signal)
glm_loss = rr.glm.logistic(X, y)
epsilon = 1./np.sqrt(n)
# view 1
cv = CV_view(glm_loss,
loss_label=loss,
lasso_randomization=randomizer,
epsilon=epsilon,
scale1=scale1,
scale2=scale2)
if glmnet:
try:
cv.solve(glmnet=glmnet)
except ImportError:
cv.solve(glmnet=False)
else:
cv.solve(glmnet=False)
# for the test make sure we also run the python code
cv_py = CV_view(glm_loss,
loss_label=loss,
lasso_randomization=randomizer,
epsilon=epsilon,
scale1=scale1,
scale2=scale2)
cv_py.solve(glmnet=False)
lam = cv.lam_CVR
print("lam", lam)
if condition_on_CVR:
cv.condition_on_opt_state()
lam = cv.one_SD_rule(direction="up")
print("new lam", lam)
# non-randomized Lasso, just looking how many vars it selects
problem = rr.simple_problem(glm_loss, rr.l1norm(p, lagrange=lam))
beta_hat = problem.solve()
active_hat = beta_hat !=0
print("non-randomized lasso ", active_hat.sum())
# view 2
W = lam_frac * np.ones(p) * lam
penalty = rr.group_lasso(np.arange(p),
weights=dict(zip(np.arange(p), W)), lagrange=1.)
M_est = glm_group_lasso(glm_loss, epsilon, penalty, randomizer)
if nboot > 0:
cv.nboot = M_est.nboot = nboot
mv = multiple_queries([cv, M_est])
mv.solve()
active_union = M_est._overall
nactive = np.sum(active_union)
print("nactive", nactive)
if nactive==0:
return None
nonzero = np.where(beta)[0]
if set(nonzero).issubset(np.nonzero(active_union)[0]):
active_set = np.nonzero(active_union)[0]
true_vec = beta[active_union]
if marginalize_subgrad == True:
M_est.decompose_subgradient(conditioning_groups=np.zeros(p, bool),
marginalizing_groups=np.ones(p, bool))
selected_features = np.zeros(p, np.bool)
selected_features[active_set] = True
unpenalized_mle = restricted_Mest(M_est.loss, selected_features)
form_covariances = glm_nonparametric_bootstrap(n, n)
target_info, target_observed = pairs_bootstrap_glm(M_est.loss, selected_features, inactive=None)
cov_info = M_est.setup_sampler()
target_cov, score_cov = form_covariances(target_info,
cross_terms=[cov_info],
nsample=M_est.nboot)
opt_sample = M_est.sampler.sample(ndraw,
burnin)
pvalues = M_est.sampler.coefficient_pvalues(unpenalized_mle,
target_cov,
score_cov,
parameter=np.zeros(selected_features.sum()),
sample=opt_sample)
intervals = M_est.sampler.confidence_intervals(unpenalized_mle, target_cov, score_cov, sample=opt_sample)
L, U = intervals.T
sel_covered = np.zeros(nactive, np.bool)
sel_length = np.zeros(nactive)
LU_naive = naive_confidence_intervals(np.diag(target_cov), target_observed)
naive_covered = np.zeros(nactive, np.bool)
naive_length = np.zeros(nactive)
naive_pvals = naive_pvalues(np.diag(target_cov), target_observed, true_vec)
active_var = np.zeros(nactive, np.bool)
for j in range(nactive):
if (L[j] <= true_vec[j]) and (U[j] >= true_vec[j]):
sel_covered[j] = 1
if (LU_naive[j, 0] <= true_vec[j]) and (LU_naive[j, 1] >= true_vec[j]):
naive_covered[j] = 1
sel_length[j] = U[j]-L[j]
naive_length[j] = LU_naive[j,1]-LU_naive[j,0]
active_var[j] = active_set[j] in nonzero
q = 0.2
BH_desicions = multipletests(pvalues, alpha=q, method="fdr_bh")[0]
return sel_covered, sel_length, naive_pvals, naive_covered, naive_length, active_var, BH_desicions, active_var
|
76391
|
from django import template
register = template.Library()
@register.simple_tag(name='render_field')
def render_field(field, **kwargs):
field.field.widget.attrs.update(kwargs)
return field
|
76429
|
import json
data = '''
[
{
"name":"Sister",
"count":95
},
{
"name":"Sidharth",
"count":94
},
{
"name":"Ilona",
"count":93
},
{
"name":"Ruairidh",
"count":93
},
{
"name":"Virginie",
"count":92
},
{
"name":"Alanda",
"count":91
},
{
"name":"Taegen",
"count":90
},
{
"name":"Dexter",
"count":89
},
{
"name":"Ricards",
"count":87
},
{
"name":"Talorcan",
"count":79
},
{
"name":"Etienne",
"count":76
},
{
"name":"Dannii",
"count":75
},
{
"name":"Claire",
"count":74
},
{
"name":"Kerry",
"count":72
},
{
"name":"Kobe",
"count":71
},
{
"name":"Meghana",
"count":69
},
{
"name":"Flint",
"count":62
},
{
"name":"Alexia",
"count":61
},
{
"name":"Sabrina",
"count":58
},
{
"name":"Sanna",
"count":56
},
{
"name":"Nelly",
"count":53
},
{
"name":"Sukhpreet",
"count":50
},
{
"name":"Merina",
"count":50
},
{
"name":"Sammie",
"count":48
},
{
"name":"Ophelia",
"count":47
},
{
"name":"Alanas",
"count":46
},
{
"name":"Macie",
"count":46
},
{
"name":"Lukmaan",
"count":42
},
{
"name":"Paisley",
"count":38
},
{
"name":"Roos",
"count":37
},
{
"name":"Kaceylee",
"count":36
},
{
"name":"Annagayle",
"count":36
},
{
"name":"Pamela",
"count":35
},
{
"name":"Jaime",
"count":33
},
{
"name":"Leilani",
"count":30
},
{
"name":"Syeda",
"count":28
},
{
"name":"Maddison",
"count":28
},
{
"name":"Oonagh",
"count":27
},
{
"name":"Tammylee",
"count":24
},
{
"name":"Bohbi",
"count":20
},
{
"name":"Rodrigo",
"count":16
},
{
"name":"Alfee",
"count":16
},
{
"name":"Ebeny",
"count":16
},
{
"name":"Aleishia",
"count":13
},
{
"name":"Rosanna",
"count":11
},
{
"name":"Kaidey",
"count":10
},
{
"name":"Maisy",
"count":8
},
{
"name":"Bader",
"count":3
},
{
"name":"Jarred",
"count":1
},
{
"name":"Indy",
"count":1
}
]
'''
info = json.loads(data)
print('User count:',len(info))
s=0
count1=0
for item in info:
print('Name : ',item["name"])
print('Count : ',item["count"])
s=s+int(item["count"])
count1=count1+1
print("Sum : ",s)
print("Count : ",count1)
|
76434
|
from tethys_sdk.testing import TethysTestCase
import tethys_services.models as service_model
from unittest import mock
class SpatialDatasetServiceTests(TethysTestCase):
def set_up(self):
pass
def tear_down(self):
pass
def test__str__(self):
sds = service_model.SpatialDatasetService(
name='test_sds',
)
self.assertEqual('test_sds', sds.__str__())
@mock.patch('tethys_services.models.GeoServerSpatialDatasetEngine')
def test_get_engine_geo_server(self, mock_sds):
sds = service_model.SpatialDatasetService(
name='test_sds',
engine=service_model.SpatialDatasetService.GEOSERVER,
endpoint='http://localhost/geoserver/rest/',
public_endpoint='http://publichost/geoserver/rest/',
username='foo',
password='password'
)
sds.save()
ret = sds.get_engine()
# Check result
mock_sds.assert_called_with(endpoint='http://localhost/geoserver/rest/', password='password', username='foo')
self.assertEqual('http://publichost/geoserver/rest/', ret.public_endpoint)
@mock.patch('tethys_services.models.TDSCatalog')
@mock.patch('tethys_services.models.session_manager')
def test_get_engine_thredds(self, mock_session_manager, mock_TDSCatalog):
sds = service_model.SpatialDatasetService(
name='test_sds',
engine=service_model.SpatialDatasetService.THREDDS,
endpoint='http://localhost/thredds/',
public_endpoint='http://publichost/thredds/',
username='foo',
password='password'
)
sds.save()
ret = sds.get_engine()
mock_session_manager.set_session_options.assert_called_with(auth=('foo', 'password'))
mock_TDSCatalog.assert_called_with('http://localhost/thredds/catalog.xml')
# Check result
self.assertEqual(mock_TDSCatalog(), ret)
@mock.patch('tethys_services.models.TDSCatalog')
@mock.patch('tethys_services.models.session_manager')
def test_get_engine_thredds_no_trailing_slashes(self, mock_session_manager, mock_TDSCatalog):
sds = service_model.SpatialDatasetService(
name='test_sds',
engine=service_model.SpatialDatasetService.THREDDS,
endpoint='http://localhost/thredds',
public_endpoint='http://publichost/thredds',
username='foo',
password='password'
)
sds.save()
ret = sds.get_engine()
mock_session_manager.set_session_options.assert_called_with(auth=('foo', 'password'))
mock_TDSCatalog.assert_called_with('http://localhost/thredds/catalog.xml')
# Check result
self.assertEqual(mock_TDSCatalog(), ret)
@mock.patch('tethys_services.models.TDSCatalog')
@mock.patch('tethys_services.models.session_manager')
def test_get_engine_thredds_no_username_password(self, mock_session_manager, mock_TDSCatalog):
sds = service_model.SpatialDatasetService(
name='test_sds',
engine=service_model.SpatialDatasetService.THREDDS,
endpoint='http://localhost/thredds',
public_endpoint='http://publichost/thredds',
)
sds.save()
ret = sds.get_engine()
mock_session_manager.set_session_options.assert_not_called()
mock_TDSCatalog.assert_called_with('http://localhost/thredds/catalog.xml')
# Check result
self.assertEqual(mock_TDSCatalog(), ret)
|
76462
|
from django.utils.functional import cached_property
from redis.exceptions import ConnectionError, ResponseError
from experiments.redis_client import get_redis_client
COUNTER_CACHE_KEY = 'experiments:participants:%s'
COUNTER_FREQ_CACHE_KEY = 'experiments:freq:%s'
class Counters(object):
@cached_property
def _redis(self):
return get_redis_client()
def increment(self, key, participant_identifier, count=1):
if count == 0:
return
try:
cache_key = COUNTER_CACHE_KEY % key
freq_cache_key = COUNTER_FREQ_CACHE_KEY % key
new_value = self._redis.hincrby(cache_key, participant_identifier, count)
# Maintain histogram of per-user counts
if new_value > count:
self._redis.hincrby(freq_cache_key, new_value - count, -1)
self._redis.hincrby(freq_cache_key, new_value, 1)
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
pass
def clear(self, key, participant_identifier):
try:
# Remove the direct entry
cache_key = COUNTER_CACHE_KEY % key
pipe = self._redis.pipeline()
freq, _ = pipe.hget(cache_key, participant_identifier).hdel(cache_key, participant_identifier).execute()
# Remove from the histogram
freq_cache_key = COUNTER_FREQ_CACHE_KEY % key
self._redis.hincrby(freq_cache_key, freq or 0, -1)
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
pass
def get(self, key):
try:
cache_key = COUNTER_CACHE_KEY % key
return self._redis.hlen(cache_key)
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
return 0
def get_frequency(self, key, participant_identifier):
try:
cache_key = COUNTER_CACHE_KEY % key
freq = self._redis.hget(cache_key, participant_identifier)
return int(freq) if freq else 0
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
return 0
def get_frequencies(self, key):
try:
freq_cache_key = COUNTER_FREQ_CACHE_KEY % key
# In some cases when there are concurrent updates going on, there can
# briefly be a negative result for some frequency count. We discard these
# as they shouldn't really affect the result, and they are about to become
# zero anyway.
return dict((int(k), int(v)) for (k, v) in self._redis.hgetall(freq_cache_key).items() if int(v) > 0)
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
return dict()
def reset(self, key):
try:
cache_key = COUNTER_CACHE_KEY % key
self._redis.delete(cache_key)
freq_cache_key = COUNTER_FREQ_CACHE_KEY % key
self._redis.delete(freq_cache_key)
return True
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
return False
def reset_pattern(self, pattern_key):
#similar to above, but can pass pattern as arg instead
try:
cache_key = COUNTER_CACHE_KEY % pattern_key
for key in self._redis.keys(cache_key):
self._redis.delete(key)
freq_cache_key = COUNTER_FREQ_CACHE_KEY % pattern_key
for key in self._redis.keys(freq_cache_key):
self._redis.delete(key)
return True
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
return False
def reset_prefix(self, key_prefix):
# Delete all data in redis for a given key prefix
from experiments.utils import grouper
try:
for key_pattern in [COUNTER_CACHE_KEY, COUNTER_FREQ_CACHE_KEY]:
match = "%s:*" % (key_pattern % key_prefix)
key_iter = self._redis.scan_iter(match)
# Delete keys in groups of 1000 to prevent problems with long
# running experiments having many participants
for keys in grouper(key_iter, 1000):
# The last group will be padded with None to reach the specified batch
# size, so these are filtered out here
self._redis.delete(*filter(None, keys))
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.