filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_30038 | """Prepare MS COCO datasets"""
import os
import shutil
import zipfile
import argparse
from encoding.utils import download, mkdir
_TARGET_DIR = os.path.expanduser('~/.encoding/data')
def parse_args():
parser = argparse.ArgumentParser(
description='Initialize MS COCO dataset.',
epilog='Example: python mscoco.py --download-dir ~/mscoco',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--download-dir', type=str, default=None, help='dataset directory on disk')
args = parser.parse_args()
return args
def download_coco(path, overwrite=False):
_DOWNLOAD_URLS = [
('http://images.cocodataset.org/zips/train2017.zip',
'10ad623668ab00c62c096f0ed636d6aff41faca5'),
('http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
'8551ee4bb5860311e79dace7e79cb91e432e78b3'),
('http://images.cocodataset.org/zips/val2017.zip',
'4950dc9d00dbe1c933ee0170f5797584351d2a41')
]
mkdir(path)
for url, checksum in _DOWNLOAD_URLS:
filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum)
# extract
with zipfile.ZipFile(filename) as zf:
zf.extractall(path=path)
def install_coco_api():
repo_url = "https://github.com/cocodataset/cocoapi"
os.system("git clone " + repo_url)
os.system("cd cocoapi/PythonAPI/ && python setup.py install")
shutil.rmtree('cocoapi')
try:
import pycocotools
except Exception:
print("Installing COCO API failed, please install it manually %s"%(repo_url))
if __name__ == '__main__':
args = parse_args()
mkdir(os.path.expanduser('~/.encoding/data'))
if args.download_dir is not None:
if os.path.isdir(_TARGET_DIR):
os.remove(_TARGET_DIR)
# make symlink
os.symlink(args.download_dir, _TARGET_DIR)
else:
download_coco(_TARGET_DIR, overwrite=False)
install_coco_api()
|
the-stack_106_30039 | import pytest
from pandas import Timedelta, Timestamp
import ibis
import ibis.common.exceptions as com
import ibis.expr.operations as ops
from ibis.backends.pandas.execution import execute
from ibis.expr.scope import Scope
from ibis.expr.timecontext import (
TimeContextRelation,
adjust_context,
compare_timecontext,
)
from ibis.expr.typing import TimeContext
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import tm # noqa: E402
class CustomAsOfJoin(ops.AsOfJoin):
pass
def test_execute_with_timecontext(time_table):
expr = time_table
# define a time context for time-series data
context = (Timestamp('20170101'), Timestamp('20170103'))
# without time context, execute produces every row
df_all = expr.execute()
assert len(df_all['time']) == 8
# with context set, execute produces only rows within context
df_within_context = expr.execute(timecontext=context)
assert len(df_within_context['time']) == 1
def test_bad_timecontext(time_table, t):
expr = time_table
# define context with illegal string
with pytest.raises(com.IbisError, match=r".*type pd.Timestamp.*"):
context = ('bad', 'context')
expr.execute(timecontext=context)
# define context with unsupport type int
with pytest.raises(com.IbisError, match=r".*type pd.Timestamp.*"):
context = (20091010, 20100101)
expr.execute(timecontext=context)
# define context with too few values
with pytest.raises(com.IbisError, match=r".*should specify.*"):
context = Timestamp('20101010')
expr.execute(timecontext=context)
# define context with begin value later than end
with pytest.raises(com.IbisError, match=r".*before or equal.*"):
context = (Timestamp('20101010'), Timestamp('20090101'))
expr.execute(timecontext=context)
# execute context with a table without time column
with pytest.raises(com.IbisError, match=r".*must have a time column.*"):
context = (Timestamp('20090101'), Timestamp('20100101'))
t.execute(timecontext=context)
def test_compare_timecontext():
c1 = (Timestamp('20170101'), Timestamp('20170103'))
c2 = (Timestamp('20170101'), Timestamp('20170111'))
c3 = (Timestamp('20160101'), Timestamp('20160103'))
c4 = (Timestamp('20161215'), Timestamp('20170102'))
assert compare_timecontext(c1, c2) == TimeContextRelation.SUBSET
assert compare_timecontext(c2, c1) == TimeContextRelation.SUPERSET
assert compare_timecontext(c1, c4) == TimeContextRelation.OVERLAP
assert compare_timecontext(c1, c3) == TimeContextRelation.NONOVERLAP
def test_context_adjustment_asof_join(
time_keyed_left, time_keyed_right, time_keyed_df1, time_keyed_df2
):
expr = time_keyed_left.asof_join(
time_keyed_right, 'time', by='key', tolerance=4 * ibis.interval(days=1)
)[time_keyed_left, time_keyed_right.other_value]
context = (Timestamp('20170105'), Timestamp('20170111'))
result = expr.execute(timecontext=context)
# compare with asof_join of manually trimmed tables
trimmed_df1 = time_keyed_df1[time_keyed_df1['time'] >= context[0]][
time_keyed_df1['time'] < context[1]
]
trimmed_df2 = time_keyed_df2[
time_keyed_df2['time'] >= context[0] - Timedelta(days=4)
][time_keyed_df2['time'] < context[1]]
expected = dd.merge_asof(
trimmed_df1,
trimmed_df2,
on='time',
by='key',
tolerance=Timedelta('4D'),
).compute()
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="TODO - windowing - #2553")
@pytest.mark.parametrize(
['interval_ibis', 'interval_pd'],
[
(ibis.interval(days=1), '1d'),
(3 * ibis.interval(days=1), '3d'),
(5 * ibis.interval(days=1), '5d'),
],
)
def test_context_adjustment_window(
time_table, time_df3, interval_ibis, interval_pd
):
# trim data manually
expected = (
time_df3.set_index('time')
.value.rolling(interval_pd, closed='both')
.mean()
)
expected = expected[expected.index >= Timestamp('20170105')].reset_index(
drop=True
)
context = Timestamp('20170105'), Timestamp('20170111')
window = ibis.trailing_window(interval_ibis, order_by=time_table.time)
expr = time_table['value'].mean().over(window)
# result should adjust time context accordingly
result = expr.execute(timecontext=context)
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(reason="TODO - windowing - #2553")
def test_setting_timecontext_in_scope(time_table, time_df3):
expected_win_1 = (
time_df3.compute()
.set_index('time')
.value.rolling('3d', closed='both')
.mean()
)
expected_win_1 = expected_win_1[
expected_win_1.index >= Timestamp('20170105')
].reset_index(drop=True)
context = Timestamp('20170105'), Timestamp('20170111')
window1 = ibis.trailing_window(
3 * ibis.interval(days=1), order_by=time_table.time
)
"""
In the following expression, Selection node will be executed first and
get table in context ('20170105', '20170101'). Then in window execution
table will be executed again with a larger context adjusted by window
preceeding days ('20170102', '20170111'). To get the correct result,
the cached table result with a smaller context must be discard and updated
to a larger time range.
"""
expr = time_table.mutate(value=time_table['value'].mean().over(window1))
result = expr.execute(timecontext=context)
tm.assert_series_equal(result["value"], expected_win_1)
@pytest.mark.xfail(reason="TODO - windowing - #2553")
def test_context_adjustment_multi_window(time_table, time_df3):
expected_win_1 = (
time_df3.compute()
.set_index('time')
.rename(columns={'value': 'v1'})['v1']
.rolling('3d', closed='both')
.mean()
)
expected_win_1 = expected_win_1[
expected_win_1.index >= Timestamp('20170105')
].reset_index(drop=True)
expected_win_2 = (
time_df3.compute()
.set_index('time')
.rename(columns={'value': 'v2'})['v2']
.rolling('2d', closed='both')
.mean()
)
expected_win_2 = expected_win_2[
expected_win_2.index >= Timestamp('20170105')
].reset_index(drop=True)
context = Timestamp('20170105'), Timestamp('20170111')
window1 = ibis.trailing_window(
3 * ibis.interval(days=1), order_by=time_table.time
)
window2 = ibis.trailing_window(
2 * ibis.interval(days=1), order_by=time_table.time
)
expr = time_table.mutate(
v1=time_table['value'].mean().over(window1),
v2=time_table['value'].mean().over(window2),
)
result = expr.execute(timecontext=context)
tm.assert_series_equal(result["v1"], expected_win_1)
tm.assert_series_equal(result["v2"], expected_win_2)
@pytest.mark.xfail(reason="TODO - windowing - #2553")
def test_context_adjustment_window_groupby_id(time_table, time_df3):
"""This test case is meant to test trim_window_result method
in dask/execution/window.py to see if it could trim Series
correctly with groupby params
"""
expected = (
time_df3.compute()
.set_index('time')
.groupby('id')
.value.rolling('3d', closed='both')
.mean()
)
# This is a MultiIndexed Series
expected = expected.reset_index()
expected = expected[expected.time >= Timestamp('20170105')].reset_index(
drop=True
)['value']
context = Timestamp('20170105'), Timestamp('20170111')
# expected.index.name = None
window = ibis.trailing_window(
3 * ibis.interval(days=1), group_by='id', order_by=time_table.time
)
expr = time_table['value'].mean().over(window)
# result should adjust time context accordingly
result = expr.execute(timecontext=context)
tm.assert_series_equal(result, expected)
def test_adjust_context_scope(time_keyed_left, time_keyed_right):
"""Test that `adjust_context` has access to `scope` by default."""
@adjust_context.register(CustomAsOfJoin)
def adjust_context_custom_asof_join(
op: ops.AsOfJoin,
scope: Scope,
timecontext: TimeContext,
) -> TimeContext:
"""Confirms that `scope` is passed in."""
assert scope is not None
return timecontext
expr = CustomAsOfJoin(
left=time_keyed_left,
right=time_keyed_right,
predicates='time',
by='key',
tolerance=ibis.interval(days=4),
).to_expr()
expr = expr[time_keyed_left, time_keyed_right.other_value]
context = (Timestamp('20170105'), Timestamp('20170111'))
expr.execute(timecontext=context)
def test_adjust_context_complete_shift(
time_keyed_left,
time_keyed_right,
time_keyed_df1,
time_keyed_df2,
):
"""Test `adjust_context` function that completely shifts the context.
This results in an adjusted context that is NOT a subset of the
original context. This is unlike an `adjust_context` function
that only expands the context.
See #3104
"""
# Create a contrived `adjust_context` function for
# CustomAsOfJoin to mock this.
@adjust_context.register(CustomAsOfJoin)
def adjust_context_custom_asof_join(
op: ops.AsOfJoin,
scope: Scope,
timecontext: TimeContext,
) -> TimeContext:
"""Shifts both the begin and end in the same direction."""
begin, end = timecontext
timedelta = execute(op.tolerance)
return (begin - timedelta, end - timedelta)
expr = CustomAsOfJoin(
left=time_keyed_left,
right=time_keyed_right,
predicates='time',
by='key',
tolerance=ibis.interval(days=4),
).to_expr()
expr = expr[time_keyed_left, time_keyed_right.other_value]
context = (Timestamp('20170101'), Timestamp('20170111'))
result = expr.execute(timecontext=context)
# Compare with asof_join of manually trimmed tables
# Left table: No shift for context
# Right table: Shift both begin and end of context by 4 days
trimmed_df1 = time_keyed_df1[time_keyed_df1['time'] >= context[0]][
time_keyed_df1['time'] < context[1]
]
trimmed_df2 = time_keyed_df2[
time_keyed_df2['time'] >= context[0] - Timedelta(days=4)
][time_keyed_df2['time'] < context[1] - Timedelta(days=4)]
expected = dd.merge_asof(
trimmed_df1,
trimmed_df2,
on='time',
by='key',
tolerance=Timedelta('4D'),
).compute()
tm.assert_frame_equal(result, expected)
|
the-stack_106_30041 | import os
from .vendored import colorconv, cm
import numpy as np
import vispy.color
_matplotlib_list_file = os.path.join(
os.path.dirname(__file__), 'matplotlib_cmaps.txt'
)
with open(_matplotlib_list_file) as fin:
matplotlib_colormaps = [line.rstrip() for line in fin]
primary_color_names = ['red', 'green', 'blue', 'cyan', 'magenta', 'yellow']
primary_colors = np.array(
[(1, 0, 0), (0, 1, 0), (0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 0)],
dtype=float,
)
simple_colormaps = {
name: vispy.color.Colormap([[0.0, 0.0, 0.0], color])
for name, color in zip(primary_color_names, primary_colors)
}
def _all_rgb():
"""Return all 256**3 valid rgb tuples."""
base = np.arange(256, dtype=np.uint8)
r, g, b = np.meshgrid(base, base, base, indexing='ij')
return np.stack((r, g, b), axis=-1).reshape((-1, 3))
# obtained with colorconv.rgb2luv(_all_rgb().reshape((-1, 256, 3)))
LUVMIN = np.array([0.0, -83.07790815, -134.09790293])
LUVMAX = np.array([100.0, 175.01447356, 107.39905336])
LUVRNG = LUVMAX - LUVMIN
# obtained with colorconv.rgb2lab(_all_rgb().reshape((-1, 256, 3)))
LABMIN = np.array([0.0, -86.18302974, -107.85730021])
LABMAX = np.array([100.0, 98.23305386, 94.47812228])
LABRNG = LABMAX - LABMIN
def _validate_rgb(colors, *, tolerance=0.0):
"""Return the subset of colors that is in [0, 1] for all channels.
Parameters
----------
colors : array of float, shape (N, 3)
Input colors in RGB space.
Other Parameters
----------------
tolerance : float, optional
Values outside of the range by less than ``tolerance`` are allowed and
clipped to be within the range.
Returns
-------
filtered_colors : array of float, shape (M, 3), M <= N
The subset of colors that are in valid RGB space.
Examples
--------
>>> colors = np.array([[ 0. , 1., 1. ],
... [ 1.1, 0., -0.03],
... [ 1.2, 1., 0.5 ]])
>>> _validate_rgb(colors)
array([[0., 1., 1.]])
>>> _validate_rgb(colors, tolerance=0.15)
array([[0., 1., 1.],
[1., 0., 0.]])
"""
lo = 0 - tolerance
hi = 1 + tolerance
valid = np.all((colors > lo) & (colors < hi), axis=1)
filtered_colors = np.clip(colors[valid], 0, 1)
return filtered_colors
def _low_discrepancy_image(image, seed=0.5):
"""Generate a 1d low discrepancy sequence of coordinates.
Parameters
----------
labels : array of int
A set of labels or label image.
seed : float
The seed from which to start the quasirandom sequence.
Returns
-------
image_out : array of float
The set of ``labels`` remapped to [0, 1] quasirandomly.
"""
phi = 1.6180339887498948482
image_out = (seed + image / phi) % 1
# Clipping slightly above 0 and below 1 is necessary to ensure that the
# labels do not get mapped to 0 which is represented by the background
# and is transparent
return np.clip(image_out, 0.00001, 1.0 - 0.00001)
def _low_discrepancy(dim, n, seed=0.5):
"""Generate a 1d, 2d, or 3d low discrepancy sequence of coordinates.
Parameters
----------
dim : one of {1, 2, 3}
The dimensionality of the sequence.
n : int
How many points to generate.
seed : float or array of float, shape (dim,)
The seed from which to start the quasirandom sequence.
Returns
-------
pts : array of float, shape (n, dim)
The sampled points.
References
----------
..[1]: http://extremelearning.com.au/unreasonable-effectiveness-of-quasirandom-sequences/
"""
phi1 = 1.6180339887498948482
phi2 = 1.32471795724474602596
phi3 = 1.22074408460575947536
seed = np.broadcast_to(seed, (1, dim))
phi = np.array([phi1, phi2, phi3])
g = 1 / phi
n = np.reshape(np.arange(n), (n, 1))
pts = (seed + (n * g[:dim])) % 1
return pts
def _color_random(n, *, colorspace='lab', tolerance=0.0, seed=0.5):
"""Generate n random RGB colors uniformly from LAB or LUV space.
Parameters
----------
n : int
Number of colors to generate.
colorspace : str, one of {'lab', 'luv', 'rgb'}
The colorspace from which to get random colors.
tolerance : float
How much margin to allow for out-of-range RGB values (these are
clipped to be in-range).
seed : float or array of float, shape (3,)
Value from which to start the quasirandom sequence.
Returns
-------
rgb : array of float, shape (n, 3)
RGB colors chosen uniformly at random from given colorspace.
"""
factor = 6 # about 1/5 of random LUV tuples are inside the space
expand_factor = 2
rgb = np.zeros((0, 3))
while len(rgb) < n:
random = _low_discrepancy(3, n * factor, seed=seed)
if colorspace == 'luv':
raw_rgb = colorconv.luv2rgb(random * LUVRNG + LUVMIN)
elif colorspace == 'rgb':
raw_rgb = random
else: # 'lab' by default
raw_rgb = colorconv.lab2rgb(random * LABRNG + LABMIN)
rgb = _validate_rgb(raw_rgb, tolerance=tolerance)
factor *= expand_factor
return rgb[:n]
def label_colormap(num_colors=256, seed=0.5):
"""Produce a colormap suitable for use with a given label set.
Parameters
----------
num_colors : int, optional
Number of unique colors to use. Default used if not given.
seed : float or array of float, length 3
The seed for the random color generator.
Returns
-------
cmap : vispy.color.Colormap
A colormap for use with labels are remapped to [0, 1].
Notes
-----
0 always maps to fully transparent.
"""
# Starting the control points slightly above 0 and below 1 is necessary
# to ensure that the background pixel 0 is transparent
midpoints = np.linspace(0.00001, 1 - 0.00001, num_colors - 1)
control_points = np.concatenate(([0], midpoints, [1.0]))
# make sure to add an alpha channel to the colors
colors = np.concatenate(
(_color_random(num_colors, seed=seed), np.full((num_colors, 1), 1)),
axis=1,
)
colors[0, :] = 0 # ensure alpha is 0 for label 0
cmap = vispy.color.Colormap(
colors=colors, controls=control_points, interpolation='zero'
)
return cmap
def vispy_or_mpl_colormap(name):
"""Try to get a colormap from vispy, or convert an mpl one to vispy format.
Parameters
----------
name : str
The name of the colormap.
Returns
-------
cmap : vispy.color.Colormap
The found colormap.
Raises
------
KeyError
If no colormap with that name is found within vispy or matplotlib.
"""
vispy_cmaps = vispy.color.get_colormaps()
if name in vispy_cmaps:
cmap = vispy.color.get_colormap(name)
else:
try:
mpl_cmap = getattr(cm, name)
except AttributeError:
raise KeyError(
f'Colormap "{name}" not found in either vispy '
'or matplotlib.'
)
mpl_colors = mpl_cmap(np.linspace(0, 1, 256))
cmap = vispy.color.Colormap(mpl_colors)
return cmap
# A dictionary mapping names to VisPy colormap objects
ALL_COLORMAPS = {k: vispy_or_mpl_colormap(k) for k in matplotlib_colormaps}
ALL_COLORMAPS.update(simple_colormaps)
# ... sorted alphabetically by name
AVAILABLE_COLORMAPS = {k: v for k, v in sorted(ALL_COLORMAPS.items())}
|
the-stack_106_30042 | """
This is Roeland’s CMD color module, which abstracts away either the ANSI color
codes on VT-style terminals, or the win32 console API. The latter is also called
directly for printing text so you can print any Unicode character up to U+FFFF
on the console.
"""
import functools as _functools
import sys as _sys
import warnings as _warnings
# this script requires Python 3
# switch colors on or off
C_COLOR_OFF = "off"
C_COLOR_ON = "on"
C_COLOR_AUTO = "auto"
C_COLOR_OPTION_LIST = (C_COLOR_OFF, C_COLOR_ON, C_COLOR_AUTO)
_useColorFlag = C_COLOR_AUTO
_ctable = (
'black' ,
'blue' ,
'green' ,
'cyan' ,
'red' ,
'magenta',
'yellow' ,
'white' )
C_BRIGHT_FLAG = 8
C_RESET = None
def colorname(i):
""" Returns the name of a color
i shall be a number from 0 to 7 """
return _ctable[i]
def _color_to_str(n):
""" integer color number to string """
if n < 16:
return ("bright " if (n & C_BRIGHT_FLAG) else "") + _ctable[n % 8]
return str(n)
@_functools.total_ordering
class Color:
"""
Represent a change in color. This can be:
- a foreground and/or background color;
- setting the bright flag;
- resetting the style.
Color is immutable, all operators return new objects. The color IDs
follow Windows convention: 1 is blue and 4 is red.
The add operator is supported, where a + b is equivalent to
'apply a, then b'
Currently, combining a color with C_BRIGHT_FLAG will result in
a bright color.
"""
def make(fg, bg, flag=0):
""" create color with given foreground and background colors (given as integers) """
c = Color()
c.fg = fg
c.bg = bg
c.flag = flag
return c
def fg(color, intensity=None):
""" create color with given foreground color (given as integer) """
return Color.make(Color._val(color, intensity), None)
def bg(color, intensity=None):
""" create color with given background color (given as integer) """
return Color.make(None, Color._val(color, intensity))
def flag(s):
""" return the C_BRIGHT_FLAG flag, if set. """
c = Color()
c.flag = s
return c
# forms: Color() -> reset
# forms: Color(color) -> copy another color object
def __init__(self, color=None):
# foreground color, if set
self.fg = None
# background color, if set
self.bg = None
# flags: now used to create a color object which just switches on
# the intensity bit (C_BRIGHT)
self.flag = 0
if color is None:
return
else:
# copy fields
self.fg = color.fg
self.bg = color.bg
self.flag = color.flag
def with_fg(self, color, intensity=None):
""" return a color with the same background, but a different foreground """
return Color.make(Color._val(color, intensity), self.bg)
def with_bg(self, color, intensity=None):
""" return a color with the same foreground, but a different background """
return Color.make(self.fg, Color._val(color, intensity), self.flag)
def bright(self):
"""
Returns a version with a bright foreground color
As a special case, C_RESET.bright() returns C_BRIGHT. """
c = Color(self)
c.flag = C_BRIGHT_FLAG
c._apply_flags()
return c
def dark(self):
""" returns a version with a dark foreground color """
c = Color(self)
c.flag = 0
if c.fg and c.fg < 16: c.fg = c.fg % 8
return c
def bright_bg(self):
""" returns a version with a bright background color """
return self.with_bg(self.bg or 0, True)
def dark_bg(self):
""" returns a version with a dark background color """
return self.with_bg(self.bg or 0, False)
def _val(color, intensity):
""" Get a valid color value and optionally set the 'intensity'. """
a = int(color)
if a < 16:
if intensity:
a = a | C_BRIGHT_FLAG
elif intensity is not None:
a = a % 8
return a
def _apply_flags(self):
""" Normalize this Color instance
a flag plus a color below 16 is converted to a high intensity color. """
if self.fg is not None and self.fg < 16 and self.flag:
self.fg |= self.flag
self.flag = 0
# a + b: apply b after a
def __add__(a, b):
c = Color(a)
if b is None: return c
try:
if b.fg is not None: c.fg = int(b.fg)
if b.bg is not None: c.bg = int(b.bg)
c.flag |= b.flag
c._apply_flags()
return c
except AttributeError:
raise ValueError("Can't add Color to "+b.__class__.__name__)
# a + b: apply b after a
def __radd__(b, a):
c = Color(b)
if a is None: return c
try:
if b.fg is None: c.fg = int(a.fg)
if b.bg is None: c.bg = int(a.bg)
c.flag |= a.flag
c._apply_flags()
return c
except AttributeError:
raise ValueError("Can't add "+ a.__class__.__name__ + " to Color")
# bool(self)
def __bool__(self):
return (self.fg is not None or self.bg is not None or self.flag != 0)
# repr(self)
def __repr__(self):
# bright text: (implies no explicit foreground color)
if self.flag == C_BRIGHT_FLAG: return 'Color(None, {}, bright)'.format(self.bg)
# regular color, or None
return 'Color({}, {})'.format(self.fg, self.bg)
# str(self)
def __str__(self):
# reset:
if not self:
return "Reset"
s = ""
if self.fg:
s += _color_to_str(self.fg)
elif self.flag:
s += "bright"
if self.bg:
if len(s) > 0: s += ", "
s += _color_to_str(self.bg) + " background"
return s
# hash(self)
def __hash__(self):
return self.fg + self.bg * 16 + self.flag * 256
# a == b
def __eq__(a, b):
return (self.fg == self.fg and
self.bg == self.bg and
self.flag == self.flag )
# a < b
def __lt__(a, b):
return a.__hash__() < b.__hash__()
# color numbers
C_RESET = Color()
C_BLACK = Color.fg(0)
C_BLUE = Color.fg(1)
C_GREEN = Color.fg(2)
C_CYAN = Color.fg(3)
C_RED = Color.fg(4)
C_MAGENTA = Color.fg(5)
C_YELLOW = Color.fg(6)
C_WHITE = Color.fg(7)
""" special value which turns on "bright" (also called "bold") """
C_BRIGHT = Color.flag(C_BRIGHT_FLAG)
C_BG_BLACK = Color.bg(0)
C_BG_BLUE = Color.bg(1)
C_BG_GREEN = Color.bg(2)
C_BG_CYAN = Color.bg(3)
C_BG_RED = Color.bg(4)
C_BG_MAGENTA = Color.bg(5)
C_BG_YELLOW = Color.bg(6)
C_BG_WHITE = Color.bg(7)
def enableColorPrinting(flag):
"""Enables or disables color output.
flag may be:
C_COLOR_OFF: Unconditionally switch off color output
C_COLOR_ON: Always try to output color if possible on this platform
C_COLOR_AUTO (default): only output color if the file handle appears to be a console.
"""
global _useColorFlag
if flag not in [C_COLOR_OFF, C_COLOR_ON, C_COLOR_AUTO]:
raise ArgumentError("flag not in "+", ".join(C_COLOR_OPTION_LIST))
_useColorFlag = flag
def _set_color_raw_ansi(color, f):
""" Sets current color using ANSI codes
Used on Windows 10 in ANSI mode, or as a fallback if neither WIN32 or curses are available. """
if not color:
_print_el(f, '\033[0m')
return
ansi = []
if color.flag:
# only bright for now.
ansi.append('1')
if color.fg is not None:
if color.fg < 16:
intensity = (color.fg >= C_BRIGHT_FLAG)
ansiC = ((color.fg & 1) << 2) + (color.fg & 2) + ((color.fg & 4) >> 2)
ansi.append('3' + str(ansiC))
if intensity: ansi.append('1')
else:
ansi.append('38;5;' + str(color.fg))
if color.bg is not None:
if color.bg < 16:
intensity = (color.bg >= C_BRIGHT_FLAG)
ansiC = ((color.bg & 1) << 2) + (color.bg & 2) + ((color.bg & 4) >> 2)
ansi.append(('10' if intensity else '4') + str(ansiC))
else:
ansi.append('48;5;' + str(color.bg))
_print_el(f, '\033[' + ';'.join(ansi) + 'm')
def _reduce_16(n):
if n < 16:
return n
if n > 231:
if n < 235:
return 0
if n < 242:
return 8
if n < 251:
return 7
return 15
n -= 16
red = n // 36
n -= 36*red
green = n // 6
blue = n - 6*green
col = 0
if (blue >= max(green, red) - 1): col = col | 1
if (green >= max(blue, red) - 1): col = col | 2
if (red >= max(blue, green) - 1): col = col | 4
val = max(blue, green, red)
if col == 7:
if val == 0:
return 0
if val < 3:
return 8
if (val > 4): col = col | 8
return col
if _sys.platform == 'win32':
# use Windows-specific color stuff
import ctypes as _ctypes
import ctypes.wintypes as _wintypes
from os import environ as _environ
# Win32 API constants
_ENABLE_PROCESSED_OUTPUT = 0x0001
_ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
# See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winprog/winprog/windows_api_reference.asp
# for information on Windows APIs.
_STD_OUTPUT_HANDLE= -11
_STD_ERROR_HANDLE = -12
# Win32 types
_WORD = _wintypes.WORD
_DWORD = _wintypes.DWORD
_SMALL_RECT = _wintypes.SMALL_RECT
_COORD = _wintypes._COORD
class _CONSOLE_SCREEN_BUFFER_INFO(_ctypes.Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", _COORD),
("dwCursorPosition", _COORD),
("wAttributes", _WORD),
("srWindow", _SMALL_RECT),
("dwMaximumWindowSize", _COORD)]
# cache function handles
_getConsoleMode = _ctypes.windll.kernel32.GetConsoleMode
_setConsoleMode = _ctypes.windll.kernel32.SetConsoleMode
_writeConsole = _ctypes.windll.kernel32.WriteConsoleW
_getConsoleScreenBufferInfo = _ctypes.windll.kernel32.GetConsoleScreenBufferInfo
_setConsoleTextAttribute = _ctypes.windll.kernel32.SetConsoleTextAttribute
def _get_color(h):
csbi = _CONSOLE_SCREEN_BUFFER_INFO()
success = _getConsoleScreenBufferInfo(h, _ctypes.byref(csbi))
if not success: return None
return Color.make(csbi.wAttributes & 15, (csbi.wAttributes >> 4) & 15)
# get our data for any handle which is a console
class _Con:
def __init__(self, std_h):
self.h = _ctypes.windll.kernel32.GetStdHandle(std_h)
self.default = _get_color(self.h)
self.use_ansi = False
self.istty = (self.default is not None)
if self.istty and _environ.get('CMDCOLOR_ANSI', '1') == '1':
# enable ANSI sequences
mode = _DWORD(0)
ok = _getConsoleMode(self.h, _ctypes.byref(mode))
if not ok:
_warnings.warn("cmdcolor initialization: call to GetConsoleMode failed", stacklevel=4)
self.istty = False
if ok:
ok = _setConsoleMode(self.h, mode.value | _ENABLE_PROCESSED_OUTPUT | _ENABLE_VIRTUAL_TERMINAL_PROCESSING)
if ok:
self.use_ansi = True
else:
self.default = C_WHITE + C_BG_BLACK
self.color = Color(self.default) # copy
_std_h = ((_sys.stdout, _STD_OUTPUT_HANDLE),
(_sys.stderr, _STD_ERROR_HANDLE))
_con = { f[0] : _Con(f[1]) for f in _std_h }
def _istty(file):
c = _con.get(file)
return c is not None and c.istty
def _colors(file):
c = _con.get(file)
return 1 if c is None else (256 if c.use_ansi else 16)
def _can_use(file):
# if the Console API is not available, our custom printing doesn't work at all
return _istty(file)
# using WriteConsoleW also solves this stupid UnicodeEncodeError on printing fancy characters:
def _print_el(file, s):
n = _ctypes.c_int(0)
utf16 = s.encode('utf-16-le') # we need to count 2 'code' units for characters beyond U+FFFF
_writeConsole(_con[file].h, s, len(utf16) // 2, _ctypes.byref(n), None)
def _set_color(color, f):
con = _con[f]
if con.use_ansi:
_set_color_raw_ansi(color, f)
return
if not color:
color = con.default
else:
color = con.color + color
con.color = color
fg, bg = _reduce_16(color.fg), _reduce_16(color.bg)
attr = fg + bg * 16
bool = _setConsoleTextAttribute(con.h, attr)
_need_flush = any(c.istty for _, c in _con.items())
# Unix and Windows/msys
else:
def _istty(file):
return file.isatty()
def _print_el(file, s):
print(file=file, end=s)
try:
import curses as _cu
_cu.setupterm()
_cols = _cu.tigetnum("colors")
_afstr = _cu.tigetstr("setaf")
_abstr = _cu.tigetstr("setab")
_colbold = _cu.tigetstr("bold").decode('ascii')
_colreset = _cu.tigetstr("sgr0").decode('ascii')
def _can_use(file):
return _cols and _cols >= 8
def _colors(file):
return min(256, _cols)
def _set_color(color, f):
if not color:
_print_el(f, _colreset)
return
if color.flag:
# map "bright" to the bold attribute
_print_el(f, _colbold)
if color.fg is not None:
fg = color.fg
if _cols <= 16:
fg = _reduce_16(fg)
if fg < 16:
ansiC = ((fg & 1) << 2) + (fg & 2) + ((fg & 4) >> 2)
if fg >= C_BRIGHT_FLAG:
_print_el(f, _colbold)
else:
ansiC = fg
_print_el(f, _cu.tparm(_afstr, ansiC).decode('ascii'))
if color.bg is not None:
bg = color.bg
if _cols <= 16:
bg = _reduce_16(bg)
if bg < 16:
ansiC = ((bg & 1) << 2) + (bg & 2) + ((bg & 4) >> 2);
if _cols >= 16 and (bg & C_BRIGHT_FLAG):
ansiC += C_BRIGHT_FLAG
else:
ansiC = bg
_print_el(f, _cu.tparm(_abstr, ansiC).decode('ascii'))
except ImportError:
# no curses available. Assume the usual ANSI codes will work
def _can_use(file):
return True
def _colors(_):
return 16
#use ANSI escape codes
_set_color = _set_color_raw_ansi
_need_flush = False
def canPrintColor(file):
""" Return True if printc is able to attempt to print colored text. """
return _can_use(file)
def numColors(file):
""" Number of colors we can print on this file.
this may return 1, 8, 16 or 256 """
if not _can_use(file):
return 1
return _colors(file)
def willPrintColor(file):
""" Return True if printc will attempt to print colored text to this file.
This depends on the setting supplied with enableColorPrinting() """
global _useColorFlag
if not _can_use(file):
return False
if _useColorFlag == C_COLOR_OFF: return False
if _useColorFlag == C_COLOR_ON: return True
if file is None: return False
# COLOR_AUTO
return _istty(file)
def printc(*args, **kwargs):
""" Analog to the print() function, but accepts Color objects to change colors
Any Color objects will cause the output color to change for subsequent text.
Other objects will be printed as usual.
end is always printed without color, this avoids common problems if the trailing
return is printed with color attributes.
If color is off, the call is equivalent to
print(*[s for s in args if type(s) is not Color], **kwargs)
"""
file = kwargs.get('file', _sys.stdout)
use = willPrintColor(file)
if not use:
# strip all color objects and print
ss = [s for s in args if type(s) is not Color]
print(*ss, **kwargs)
return
sep0 = str(kwargs.get('sep', ' '))
end = str(kwargs.get('end', '\n'))
try:
if _need_flush: file.flush()
sep = None
for s in args:
if type(s) is Color:
_set_color(s, file)
else:
# handle separators. Colors do not trigger
# separators
if sep is not None:
_print_el(file, sep)
sep = None
_print_el(file, str(s))
sep = sep0
finally:
_set_color(C_RESET, file)
_print_el(file, end)
if __name__ == "__main__":
if "--force" in _sys.argv:
enableColorPrinting(C_COLOR_ON)
print("This is the ", end="")
s = "«cmdcolor»"
cl = [4, 12, 12, 14, 14, 10, 10, 11, 9, 1]
for ch, c in zip(s, cl): printc(Color.fg(c), ch, end="")
print(" module. Import it into your favorite script to print\ncolors.")
if not canPrintColor(_sys.stdout):
print("Current stdout cannot print colors")
elif not willPrintColor(_sys.stdout):
print("Current stdout will not print colors")
if "--help" in _sys.argv or "--info" in _sys.argv or "-h" in _sys.argv:
print()
printc("You can display a color chart by using the", C_BRIGHT, "--chart", C_RESET, "option.")
printc("In 256 color mode use", C_BRIGHT, "--chart256", C_RESET, "or", C_BRIGHT, "--chart256bg", C_RESET, end=".\n")
printc("Use", C_BRIGHT, "--force", C_RESET, "to always try to print color.")
printc()
printc("Status:")
printc(" stdout:", str(numColors(_sys.stdout)) + " colors" if willPrintColor(_sys.stdout) else "no colors")
printc(" stderr:", str(numColors(_sys.stderr)) + " colors" if willPrintColor(_sys.stderr) else "no colors")
elif "--chart" in _sys.argv:
print()
printc("standard text, ", C_BRIGHT, "bold text", C_RESET, ".", sep="")
print()
print(" {:<26} {:<26}".format("foreground colors", "background colors"))
for i in range(8):
printc("{:2}:" .format(i), Color.fg(i) , "{:<7}".format(colorname(i)), C_RESET,
" {:2}:".format(i+C_BRIGHT_FLAG), Color.fg(i, True) , "{:<7}".format(colorname(i)), C_RESET,
" {:2}:".format(i) , Color.bg(i) , "{:<7}".format(colorname(i)), C_RESET,
" {:2}:".format(i+C_BRIGHT_FLAG), C_BLACK.with_bg(i, True), "{:<7}".format(colorname(i)), C_RESET)
elif "--chart256" in _sys.argv or "--chart256bg" in _sys.argv:
C = (lambda x : Color.make(0, x)) if "--chart256bg" in _sys.argv else Color.fg
print()
for i in range(16):
printc(C(i), "{:03}".format(i), end=' ')
printc()
printc()
for a in range(6):
for b in range(6):
for c in range(6):
i = 16 + c + 6*(b + 6*a)
printc(C(i), "{:03}".format(i), end=' ')
printc()
printc()
printc()
for i in range(232, 244):
printc(C(i), "{:03}".format(i), end=' ')
printc()
for i in range(244, 256):
printc(C(i), "{:03}".format(i), end=' ')
printc()
|
the-stack_106_30045 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms, utils
from torch.utils.data import Dataset, DataLoader
import time
from matplotlib import pyplot as plt
import matplotlib.patches as patches
import glob
import cv2
import numpy as np
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
print(f'Using {device}')
#input shape: nx1050x1680x3 or 1
#output shape: nx2 (x,y)
class ArmCameraDataset(Dataset):
def __init__(self, arm_dir, overhead_dir):
pass
def __len__(self):
pass
def __getitem__(self, idx):
pass
def calculate_loss(network,loss_fn,data_generator):
#calculate the average loss over the data
network.eval()
total_loss = 0
n_samples = 0
with torch.no_grad():
for x_batch, y_batch in data_generator:
x,y = x_batch.to(device), y_batch.to(device)
output = network(x)
loss = loss_fn(output,y)
total_loss += (loss)*len(y_batch)
n_samples += len(y_batch)
network.train()
return total_loss/n_samples
def log_statistics(network, loss_fn, trainloader, validationloader):
# convenience function to calculate all the stats
train_loss = calculate_loss(network, loss_fn, trainloader)
val_loss = calculate_loss(network, loss_fn, validationloader)
# train_err = 1-calculate_accuracy(network,trainloader)
# val_err = 1-calculate_accuracy(network,validationloader)
print(f"training loss: {train_loss}")
# print(f"training accuracy: {1-train_err}")
print(f"validation loss: {val_loss}")
return train_loss, val_loss
class MnistNetwork(nn.Module):
'''
The core network used in most of these experiments
'''
def __init__(self):
super().__init__()
#2d convolution layer using (3x3) filter size, with 32 channels, and a ReLU activation
self.conv1 = nn.Conv2d(in_channels=3,out_channels=64,kernel_size=(11,6)) # 1 input channel because grayscale
#2d MaxPool layer with a (2x2) downsampling factor
self.maxpool1 = nn.MaxPool2d(kernel_size=(2,2))
#2d convolution layer using (3x3) filter size, with 32 channels, and a ReLU activation
self.conv2 = nn.Conv2d(in_channels=64,out_channels=64,kernel_size=(6,4))
self.conv2_bn = nn.BatchNorm2d(64)
#2d MaxPool layer with a (2x2) downsampling factor
self.maxpool2 = nn.MaxPool2d(kernel_size=(3,3))
#dense layer with 128-dimensional output and ReLU activation
self.dense1 = nn.Linear(30*19*64,48*64) #based on output shapes below--is there some less hard-coded way to calculate this?
# self.dense1_bn = nn.BatchNorm1d(128)
#10d output and softmax activation, which maps to a distribution over 10 MNIST classes
# self.dense2 = nn.Linear(128,2)
def forward(self, x):
x = x.view(-1,3,200,125) # reshaping
x = self.maxpool1(F.leaky_relu(self.conv1(x))) # output shape is (1,32,1040/90/190,1040/90/120)--pool-->(1,32,260/45/95,260/45/60)
x = self.maxpool2(F.leaky_relu(self.conv2_bn(self.conv2(x)))) # output shape is (1,32,252/40/90,252/40/57)--pool-->(1,32,30,19)
x = F.leaky_relu(self.dense1(x.view(-1,30*19*64))) # flatten input to dense1, then relu output, output shape is (1,128)
# x = F.softmax(self.dense2(x), dim=1) # softmaxing over [1,10] so dim=1 b/c we want the 10 outputs to sum to 1<<---REMOVE! softmax is in loss function
# x = self.dense2(x)
return x
def predict(self, x):
return F.softmax(self.forward(x),dim=1)
def plot_prediction(network,i,test=True):
with torch.no_grad():
overhead_path = 'overhead_pics/*'
overhead_files = glob.glob(overhead_path)
overhead_files.sort()
overhead_img = cv2.imread(overhead_files[i], cv2.IMREAD_GRAYSCALE)
if test:
test_img,loc = test_data[0]
pred = network(test_img[i].to(device)).cpu().numpy()[0]
else:
test_img,loc = train_data[0]
pred = network(test_img[i].to(device)).cpu().numpy()[0]
pred[0] *= 640
pred[1] *= 480
print((loc[i][0]*640, loc[i][1]*480))
print(pred)
fig,ax = plt.subplots(1)
ax.imshow(overhead_img)
circle = patches.Circle(pred, radius=5, edgecolor='r', facecolor='r')
ax.add_patch(circle)
plt.show()
def get_loc_field(loc):
field = np.zeros((48,64))
field[int(loc[1]//10),int(loc[0]//10)] = 1
return field
if __name__ == '__main__':
cnn = MnistNetwork()
cnn.to(device)
batchsize = 32
rawimages = np.load('armpicscolor.npy')
images = torch.split(torch.from_numpy(rawimages).float(), batchsize)
rawlocations = np.load('locs.npy')
rawlocfields = np.array([np.argmax(get_loc_field(loc).flatten()) for loc in rawlocations])
# rawlocations[:,0] /= 640 #scale x to 0,1
# rawlocations[:,1] /= 480 #scale y to 0,1
locations = torch.split((torch.from_numpy(rawlocfields)).long(), batchsize)
p = transforms.Compose([transforms.Scale(64,64)])
test_data = list(zip(images[:2],locations[:2]))
train_data = list(zip(images[2:], locations[2:])) # just split the first batch for test because the data's random anyway
# train using SGD with momentum = 0.99, step size = 0.001, minibatch size=32, for 5 epochs
optimizer = optim.Adam(cnn.parameters(), lr=0.001)
loss_fn = nn.CrossEntropyLoss()
epochs = 100
train_losses = []
val_losses = []
training_time = 0
# log statistics
# train_loss, train_err, val_err = log_statistics(nn, loss_fn, trainloader, validationloader)
# train_losses.append(train_loss)
# train_errors.append(train_err)
# val_errors.append(val_err)
for epoch in range(epochs):
start_time = time.time()
for x_batch,y_batch in train_data:
# print("batch")
x,y = x_batch.to(device),y_batch.to(device)
optimizer.zero_grad()
output = cnn(x)
loss = loss_fn(output, y)
loss.backward()
optimizer.step()
epoch_time = time.time()-start_time
training_time += epoch_time
print(f"Epoch time: {epoch_time} s")
# log statistics
train_loss, val_loss = log_statistics(cnn, loss_fn, train_data, test_data)
train_losses.append(train_loss)
val_losses.append(val_loss)
plot_prediction(cnn,0)
import pdb; pdb.set_trace()
|
the-stack_106_30047 | """Provides BUILD macros for MediaPipe graphs.
mediapipe_binary_graph() converts a graph from text format to serialized binary
format.
Example:
mediapipe_binary_graph(
name = "make_graph_binarypb",
graph = "//mediapipe/framework/tool/testdata:test_graph",
output_name = "test.binarypb",
deps = [
"//video/annotation:graph_calculators_lib",
]
)
"""
load("@mediapipe//mediapipe/framework:encode_binary_proto.bzl", "encode_binary_proto", "generate_proto_descriptor_set")
load("@mediapipe//mediapipe/framework:transitive_protos.bzl", "transitive_protos")
load("@mediapipe//mediapipe/framework/deps:expand_template.bzl", "expand_template")
def mediapipe_binary_graph(name, graph = None, output_name = None, deps = [], testonly = False, **kwargs):
"""Converts a graph from text format to binary format."""
if not graph:
fail("No input graph file specified.")
if not output_name:
fail("Must specify the output_name.")
transitive_protos(
name = name + "_gather_cc_protos",
deps = deps,
testonly = testonly,
)
# Compile a simple proto parser binary using the deps.
native.cc_binary(
name = name + "_text_to_binary_graph",
visibility = ["//visibility:private"],
deps = [
"@mediapipe//mediapipe/framework/tool:text_to_binary_graph",
name + "_gather_cc_protos",
],
tags = ["manual"],
testonly = testonly,
)
# Invoke the proto parser binary.
native.genrule(
name = name,
srcs = [graph],
outs = [output_name],
cmd = (
"$(location " + name + "_text_to_binary_graph" + ") " +
("--proto_source=$(location %s) " % graph) +
("--proto_output=\"$@\" ")
),
tools = [name + "_text_to_binary_graph"],
testonly = testonly,
)
def data_as_c_string(
name,
srcs,
outs = None,
testonly = None):
"""Encodes the data from a file as a C string literal.
This produces a text file containing the quoted C string literal. It can be
included directly in a C++ source file.
Args:
name: The name of the rule.
srcs: A list containing a single item, the file to encode.
outs: A list containing a single item, the name of the output text file.
Defaults to the rule name.
testonly: pass 1 if the graph is to be used only for tests.
"""
if len(srcs) != 1:
fail("srcs must be a single-element list")
if outs == None:
outs = [name]
native.genrule(
name = name,
srcs = srcs,
outs = outs,
cmd = "$(location @mediapipe//mediapipe/framework/tool:encode_as_c_string) \"$<\" > \"$@\"",
tools = ["@mediapipe//mediapipe/framework/tool:encode_as_c_string"],
testonly = testonly,
)
def mediapipe_simple_subgraph(
name,
register_as,
graph,
deps = [],
visibility = None,
testonly = None,
**kwargs):
"""Defines a registered subgraph for inclusion in other graphs.
Args:
name: name of the subgraph target to define.
register_as: name used to invoke this graph in supergraphs. Should be in
CamelCase.
graph: the BUILD label of a text-format MediaPipe graph.
deps: any calculators or subgraphs used by this graph.
visibility: The list of packages the subgraph should be visible to.
testonly: pass 1 if the graph is to be used only for tests.
**kwargs: Remaining keyword args, forwarded to cc_library.
"""
graph_base_name = name
mediapipe_binary_graph(
name = name + "_graph",
graph = graph,
output_name = graph_base_name + ".binarypb",
deps = deps,
testonly = testonly,
)
data_as_c_string(
name = name + "_inc",
srcs = [graph_base_name + ".binarypb"],
outs = [graph_base_name + ".inc"],
)
# cc_library for a linked mediapipe graph.
expand_template(
name = name + "_linked_cc",
template = "@mediapipe//mediapipe/framework/tool:simple_subgraph_template.cc",
out = name + "_linked.cc",
substitutions = {
"{{SUBGRAPH_CLASS_NAME}}": register_as,
"{{SUBGRAPH_INC_FILE_PATH}}": native.package_name() + "/" + graph_base_name + ".inc",
},
testonly = testonly,
)
native.cc_library(
name = name,
srcs = [
name + "_linked.cc",
graph_base_name + ".inc",
],
deps = [
"@mediapipe//mediapipe/framework:calculator_framework",
"@mediapipe//mediapipe/framework:subgraph",
] + deps,
alwayslink = 1,
visibility = visibility,
testonly = testonly,
**kwargs
)
|
the-stack_106_30048 | from typing import Optional, Sequence
from faker import Factory
from tabledata import TableData
from ._common import get_providers
class TableFaker:
def __init__(self, locale: Optional[str] = None, seed: Optional[int] = None) -> None:
self.__fake = Factory.create(locale)
if seed is not None:
self.__fake.seed(seed)
def generate(
self,
providers: Sequence[str],
rows: int,
table_name: Optional[str] = None,
headers: Optional[Sequence[str]] = None,
max_workers: Optional[int] = None,
) -> TableData:
"""Generate fake data as tabular data.
Args:
providers:
List of provider names to generate a tabular data.
rows:
Number of rows in the tabular data.
headers:
List of header names.
max_workers:
Maximum number of workers to generate table data.
In default, the same as the total number of CPUs.
Returns:
tabledata.TableData: Generated fake tabular data.
"""
self.__validate_provider(providers)
if rows < 0:
raise ValueError("invalid rows")
return TableData(
table_name,
headers if headers else providers,
[
[getattr(self.__fake, faker_name)() for faker_name in providers]
for _row in range(rows)
],
max_workers=max_workers,
)
@staticmethod
def __validate_provider(providers: Sequence[str]) -> None:
if not providers:
raise ValueError("require provider(s)")
diff = set(providers) - get_providers()
if diff:
raise ValueError("unknown providers found: {}".format(diff))
|
the-stack_106_30049 | import os
import pathlib
import uuid
from hadoop_fs_wrapper.wrappers.file_system import FileSystem
from pyspark.sql import SparkSession
from datetime import datetime
from spark_utils.common.functions import is_valid_source_path
from spark_utils.dataframes.functions import copy_dataframe_to_socket
from spark_utils.dataframes.models import CopyDataOptions
from spark_utils.models.job_socket import JobSocket
def test_copy_dataframe_to_socket(spark_session: SparkSession, test_base_path: str):
test_data_path = os.path.join(test_base_path, 'copy_dataframe_to_socket')
copy_dataframe_to_socket(
spark_session=spark_session,
copy_options=CopyDataOptions(
src=JobSocket('src', f'file:///{test_data_path}/file-to-copy', 'csv'),
dest=JobSocket('dst', f'file:///{test_data_path}/out-with-copy', 'json'),
read_options={
"delimiter": ";",
"header": "true"
}
)
)
files = os.listdir(f"{test_data_path}/out-with-copy")
assert is_valid_source_path(FileSystem.from_spark_session(spark_session), path=f"{test_data_path}/out-with-copy") and len(
files) > 0
def test_copy_dataframe_to_socket_with_filename(spark_session: SparkSession, test_base_path: str):
test_data_path = os.path.join(test_base_path, 'copy_dataframe_to_socket')
copy_dataframe_to_socket(
spark_session=spark_session,
copy_options=CopyDataOptions(
src=JobSocket('src', f'file:///{test_data_path}/file-to-copy', 'csv'),
dest=JobSocket('dst', f'file:///{test_data_path}/out-with-filename', 'json'),
read_options={
"delimiter": ";",
"header": "true"
},
include_filename=True
)
)
files = [file for file in os.listdir(f"{test_data_path}/out-with-filename") if file.endswith(".json")]
file_contents = open(f"{test_data_path}/out-with-filename/{files[0]}", 'r').read()
assert 'file-to-copy' in file_contents
def test_copy_dataframe_to_socket_with_sequence(spark_session: SparkSession, test_base_path: str):
test_data_path = os.path.join(test_base_path, 'copy_dataframe_to_socket')
copy_dataframe_to_socket(
spark_session=spark_session,
copy_options=CopyDataOptions(
src=JobSocket('src', f'file:///{test_data_path}/file-to-copy', 'csv'),
dest=JobSocket('dst', f'file:///{test_data_path}/out-with-row-sequence', 'json'),
read_options={
"delimiter": ";",
"header": "true"
},
include_filename=True,
include_row_sequence=True
)
)
files = [file for file in os.listdir(f"{test_data_path}/out-with-row-sequence") if file.endswith(".json")]
file_contents = open(f"{test_data_path}/out-with-row-sequence/{files[0]}", 'r').read()
assert 'row_sequence' in file_contents
assert '0' in file_contents
def test_copy_dataframe_to_socket_with_timestamp(spark_session: SparkSession):
test_data_path = f"{pathlib.Path(__file__).parent.resolve()}/copy_dataframe_to_socket"
uuid_ = str(uuid.uuid4())
copy_stats = copy_dataframe_to_socket(
spark_session=spark_session,
copy_options=CopyDataOptions(
src=JobSocket('src', f'file:///{test_data_path}/file-to-copy-with-ts', 'csv'),
dest=JobSocket('dst', f'file:///{test_data_path}/{uuid_}', 'json'),
read_options={
"delimiter": ";",
"header": "true"
},
include_filename=True,
timestamp_column='ts',
timestamp_column_format="yyyy-MM-dd'T'HH:mm:ss"
)
)
approx_age = (datetime.now() - datetime(2021, 10, 6, 1, 0, 0)).total_seconds()
assert copy_stats['original_row_count'] == 0 \
and copy_stats['original_content_age'] == 0 \
and copy_stats['row_count'] == 4 \
and 1 - copy_stats['content_age'] / approx_age < 0.01
|
the-stack_106_30052 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from copy import copy
from typing import Any, Dict, List, Optional, Set, TYPE_CHECKING
from urllib import parse
import sqlalchemy as sqla
from flask_appbuilder import Model
from flask_appbuilder.models.decorators import renders
from flask_appbuilder.security.sqla.models import User
from markupsafe import escape, Markup
from sqlalchemy import (
Boolean,
Column,
ForeignKey,
Integer,
MetaData,
String,
Table,
Text,
UniqueConstraint,
)
from sqlalchemy.engine.base import Connection
from sqlalchemy.orm import relationship, subqueryload
from sqlalchemy.orm.mapper import Mapper
from superset import app, ConnectorRegistry, db, is_feature_enabled, security_manager
from superset.models.helpers import AuditMixinNullable, ImportMixin
from superset.models.slice import Slice
from superset.models.tags import DashboardUpdater
from superset.models.user_attributes import UserAttribute
from superset.tasks.thumbnails import cache_dashboard_thumbnail
from superset.utils import core as utils
from superset.utils.dashboard_filter_scopes_converter import (
convert_filter_scopes,
copy_filter_scopes,
)
from superset.utils.urls import get_url_path
if TYPE_CHECKING:
# pylint: disable=unused-import
from superset.connectors.base.models import BaseDatasource
metadata = Model.metadata # pylint: disable=no-member
config = app.config
logger = logging.getLogger(__name__)
def copy_dashboard( # pylint: disable=unused-argument
mapper: Mapper, connection: Connection, target: "Dashboard"
) -> None:
dashboard_id = config["DASHBOARD_TEMPLATE_ID"]
if dashboard_id is None:
return
new_user = db.session.query(User).filter_by(id=target.id).first()
# copy template dashboard to user
template = db.session.query(Dashboard).filter_by(id=int(dashboard_id)).first()
dashboard = Dashboard(
dashboard_title=template.dashboard_title,
position_json=template.position_json,
description=template.description,
css=template.css,
json_metadata=template.json_metadata,
slices=template.slices,
owners=[new_user],
)
db.session.add(dashboard)
db.session.commit()
# set dashboard as the welcome dashboard
extra_attributes = UserAttribute(
user_id=target.id, welcome_dashboard_id=dashboard.id
)
db.session.add(extra_attributes)
db.session.commit()
sqla.event.listen(User, "after_insert", copy_dashboard)
dashboard_slices = Table(
"dashboard_slices",
metadata,
Column("id", Integer, primary_key=True),
Column("dashboard_id", Integer, ForeignKey("dashboards.id")),
Column("slice_id", Integer, ForeignKey("slices.id")),
UniqueConstraint("dashboard_id", "slice_id"),
)
dashboard_user = Table(
"dashboard_user",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("ab_user.id")),
Column("dashboard_id", Integer, ForeignKey("dashboards.id")),
)
class Dashboard( # pylint: disable=too-many-instance-attributes
Model, AuditMixinNullable, ImportMixin
):
"""The dashboard object!"""
__tablename__ = "dashboards"
id = Column(Integer, primary_key=True)
dashboard_title = Column(String(500))
position_json = Column(utils.MediumText())
description = Column(Text)
css = Column(Text)
json_metadata = Column(Text)
slug = Column(String(255), unique=True)
slices = relationship("Slice", secondary=dashboard_slices, backref="dashboards")
owners = relationship(security_manager.user_model, secondary=dashboard_user)
published = Column(Boolean, default=False)
export_fields = [
"dashboard_title",
"position_json",
"json_metadata",
"description",
"css",
"slug",
]
def __repr__(self) -> str:
return self.dashboard_title or str(self.id)
@property
def table_names(self) -> str:
# pylint: disable=no-member
return ", ".join(str(s.datasource.full_name) for s in self.slices)
@property
def url(self) -> str:
if self.json_metadata:
# add default_filters to the preselect_filters of dashboard
json_metadata = json.loads(self.json_metadata)
default_filters = json_metadata.get("default_filters")
# make sure default_filters is not empty and is valid
if default_filters and default_filters != "{}":
try:
if json.loads(default_filters):
filters = parse.quote(default_filters.encode("utf8"))
return "/superset/dashboard/{}/?preselect_filters={}".format(
self.slug or self.id, filters
)
except Exception: # pylint: disable=broad-except
pass
return f"/superset/dashboard/{self.slug or self.id}/"
@property
def datasources(self) -> Set[Optional["BaseDatasource"]]:
return {slc.datasource for slc in self.slices}
@property
def charts(self) -> List[Optional["BaseDatasource"]]:
return [slc.chart for slc in self.slices]
@property
def sqla_metadata(self) -> None:
# pylint: disable=no-member
meta = MetaData(bind=self.get_sqla_engine())
meta.reflect()
@renders("dashboard_title")
def dashboard_link(self) -> Markup:
title = escape(self.dashboard_title or "<empty>")
return Markup(f'<a href="{self.url}">{title}</a>')
@property
def digest(self) -> str:
"""
Returns a MD5 HEX digest that makes this dashboard unique
"""
unique_string = f"{self.position_json}.{self.css}.{self.json_metadata}"
return utils.md5_hex(unique_string)
@property
def thumbnail_url(self) -> str:
"""
Returns a thumbnail URL with a HEX digest. We want to avoid browser cache
if the dashboard has changed
"""
return f"/api/v1/dashboard/{self.id}/thumbnail/{self.digest}/"
@property
def changed_by_name(self) -> str:
if not self.changed_by:
return ""
return str(self.changed_by)
@property
def changed_by_url(self) -> str:
if not self.changed_by:
return ""
return f"/superset/profile/{self.changed_by.username}"
@property
def data(self) -> Dict[str, Any]:
positions = self.position_json
if positions:
positions = json.loads(positions)
return {
"id": self.id,
"metadata": self.params_dict,
"css": self.css,
"dashboard_title": self.dashboard_title,
"published": self.published,
"slug": self.slug,
"slices": [slc.data for slc in self.slices],
"position_json": positions,
}
@property # type: ignore
def params(self) -> str: # type: ignore
return self.json_metadata
@params.setter
def params(self, value: str) -> None:
self.json_metadata = value
@property
def position(self) -> Dict[str, Any]:
if self.position_json:
return json.loads(self.position_json)
return {}
@classmethod
def import_obj( # pylint: disable=too-many-locals,too-many-branches,too-many-statements
cls, dashboard_to_import: "Dashboard", import_time: Optional[int] = None,
) -> int:
"""Imports the dashboard from the object to the database.
Once dashboard is imported, json_metadata field is extended and stores
remote_id and import_time. It helps to decide if the dashboard has to
be overridden or just copies over. Slices that belong to this
dashboard will be wired to existing tables. This function can be used
to import/export dashboards between multiple superset instances.
Audit metadata isn't copied over.
"""
def alter_positions(
dashboard: Dashboard, old_to_new_slc_id_dict: Dict[int, int]
) -> None:
""" Updates slice_ids in the position json.
Sample position_json data:
{
"DASHBOARD_VERSION_KEY": "v2",
"DASHBOARD_ROOT_ID": {
"type": "DASHBOARD_ROOT_TYPE",
"id": "DASHBOARD_ROOT_ID",
"children": ["DASHBOARD_GRID_ID"]
},
"DASHBOARD_GRID_ID": {
"type": "DASHBOARD_GRID_TYPE",
"id": "DASHBOARD_GRID_ID",
"children": ["DASHBOARD_CHART_TYPE-2"]
},
"DASHBOARD_CHART_TYPE-2": {
"type": "CHART",
"id": "DASHBOARD_CHART_TYPE-2",
"children": [],
"meta": {
"width": 4,
"height": 50,
"chartId": 118
}
},
}
"""
position_data = json.loads(dashboard.position_json)
position_json = position_data.values()
for value in position_json:
if (
isinstance(value, dict)
and value.get("meta")
and value.get("meta", {}).get("chartId")
):
old_slice_id = value["meta"]["chartId"]
if old_slice_id in old_to_new_slc_id_dict:
value["meta"]["chartId"] = old_to_new_slc_id_dict[old_slice_id]
dashboard.position_json = json.dumps(position_data)
logger.info(
"Started import of the dashboard: %s", dashboard_to_import.to_json()
)
logger.info("Dashboard has %d slices", len(dashboard_to_import.slices))
# copy slices object as Slice.import_slice will mutate the slice
# and will remove the existing dashboard - slice association
slices = copy(dashboard_to_import.slices)
# Clearing the slug to avoid conflicts
dashboard_to_import.slug = None
old_json_metadata = json.loads(dashboard_to_import.json_metadata or "{}")
old_to_new_slc_id_dict: Dict[int, int] = {}
new_timed_refresh_immune_slices = []
new_expanded_slices = {}
new_filter_scopes = {}
i_params_dict = dashboard_to_import.params_dict
remote_id_slice_map = {
slc.params_dict["remote_id"]: slc
for slc in db.session.query(Slice).all()
if "remote_id" in slc.params_dict
}
for slc in slices:
logger.info(
"Importing slice %s from the dashboard: %s",
slc.to_json(),
dashboard_to_import.dashboard_title,
)
remote_slc = remote_id_slice_map.get(slc.id)
new_slc_id = Slice.import_obj(slc, remote_slc, import_time=import_time)
old_to_new_slc_id_dict[slc.id] = new_slc_id
# update json metadata that deals with slice ids
new_slc_id_str = str(new_slc_id)
old_slc_id_str = str(slc.id)
if (
"timed_refresh_immune_slices" in i_params_dict
and old_slc_id_str in i_params_dict["timed_refresh_immune_slices"]
):
new_timed_refresh_immune_slices.append(new_slc_id_str)
if (
"expanded_slices" in i_params_dict
and old_slc_id_str in i_params_dict["expanded_slices"]
):
new_expanded_slices[new_slc_id_str] = i_params_dict["expanded_slices"][
old_slc_id_str
]
# since PR #9109, filter_immune_slices and filter_immune_slice_fields
# are converted to filter_scopes
# but dashboard create from import may still have old dashboard filter metadata
# here we convert them to new filter_scopes metadata first
filter_scopes = {}
if (
"filter_immune_slices" in i_params_dict
or "filter_immune_slice_fields" in i_params_dict
):
filter_scopes = convert_filter_scopes(old_json_metadata, slices)
if "filter_scopes" in i_params_dict:
filter_scopes = old_json_metadata.get("filter_scopes")
# then replace old slice id to new slice id:
if filter_scopes:
new_filter_scopes = copy_filter_scopes(
old_to_new_slc_id_dict=old_to_new_slc_id_dict,
old_filter_scopes=filter_scopes,
)
# override the dashboard
existing_dashboard = None
for dash in db.session.query(Dashboard).all():
if (
"remote_id" in dash.params_dict
and dash.params_dict["remote_id"] == dashboard_to_import.id
):
existing_dashboard = dash
dashboard_to_import = dashboard_to_import.copy()
dashboard_to_import.id = None
dashboard_to_import.reset_ownership()
# position_json can be empty for dashboards
# with charts added from chart-edit page and without re-arranging
if dashboard_to_import.position_json:
alter_positions(dashboard_to_import, old_to_new_slc_id_dict)
dashboard_to_import.alter_params(import_time=import_time)
dashboard_to_import.remove_params(param_to_remove="filter_immune_slices")
dashboard_to_import.remove_params(param_to_remove="filter_immune_slice_fields")
if new_filter_scopes:
dashboard_to_import.alter_params(filter_scopes=new_filter_scopes)
if new_expanded_slices:
dashboard_to_import.alter_params(expanded_slices=new_expanded_slices)
if new_timed_refresh_immune_slices:
dashboard_to_import.alter_params(
timed_refresh_immune_slices=new_timed_refresh_immune_slices
)
new_slices = (
db.session.query(Slice)
.filter(Slice.id.in_(old_to_new_slc_id_dict.values()))
.all()
)
if existing_dashboard:
existing_dashboard.override(dashboard_to_import)
existing_dashboard.slices = new_slices
db.session.flush()
return existing_dashboard.id
dashboard_to_import.slices = new_slices
db.session.add(dashboard_to_import)
db.session.flush()
return dashboard_to_import.id # type: ignore
@classmethod
def export_dashboards( # pylint: disable=too-many-locals
cls, dashboard_ids: List[int]
) -> str:
copied_dashboards = []
datasource_ids = set()
for dashboard_id in dashboard_ids:
# make sure that dashboard_id is an integer
dashboard_id = int(dashboard_id)
dashboard = (
db.session.query(Dashboard)
.options(subqueryload(Dashboard.slices))
.filter_by(id=dashboard_id)
.first()
)
# remove ids and relations (like owners, created by, slices, ...)
copied_dashboard = dashboard.copy()
for slc in dashboard.slices:
datasource_ids.add((slc.datasource_id, slc.datasource_type))
copied_slc = slc.copy()
# save original id into json
# we need it to update dashboard's json metadata on import
copied_slc.id = slc.id
# add extra params for the import
copied_slc.alter_params(
remote_id=slc.id,
datasource_name=slc.datasource.datasource_name,
schema=slc.datasource.schema,
database_name=slc.datasource.database.name,
)
# set slices without creating ORM relations
slices = copied_dashboard.__dict__.setdefault("slices", [])
slices.append(copied_slc)
copied_dashboard.alter_params(remote_id=dashboard_id)
copied_dashboards.append(copied_dashboard)
eager_datasources = []
for datasource_id, datasource_type in datasource_ids:
eager_datasource = ConnectorRegistry.get_eager_datasource(
datasource_type, datasource_id
)
copied_datasource = eager_datasource.copy()
copied_datasource.alter_params(
remote_id=eager_datasource.id,
database_name=eager_datasource.database.name,
)
datasource_class = copied_datasource.__class__
for field_name in datasource_class.export_children:
field_val = getattr(eager_datasource, field_name).copy()
# set children without creating ORM relations
copied_datasource.__dict__[field_name] = field_val
eager_datasources.append(copied_datasource)
return json.dumps(
{"dashboards": copied_dashboards, "datasources": eager_datasources},
cls=utils.DashboardEncoder,
indent=4,
)
def event_after_dashboard_changed( # pylint: disable=unused-argument
mapper: Mapper, connection: Connection, target: Dashboard
) -> None:
url = get_url_path("Superset.dashboard", dashboard_id_or_slug=target.id)
cache_dashboard_thumbnail.delay(url, target.digest, force=True)
# events for updating tags
if is_feature_enabled("TAGGING_SYSTEM"):
sqla.event.listen(Dashboard, "after_insert", DashboardUpdater.after_insert)
sqla.event.listen(Dashboard, "after_update", DashboardUpdater.after_update)
sqla.event.listen(Dashboard, "after_delete", DashboardUpdater.after_delete)
# events for updating tags
if is_feature_enabled("THUMBNAILS_SQLA_LISTENERS"):
sqla.event.listen(Dashboard, "after_insert", event_after_dashboard_changed)
sqla.event.listen(Dashboard, "after_update", event_after_dashboard_changed)
|
the-stack_106_30053 | import gdb
from undodb.debugger_extensions import udb
def count_calls(func_name):
"""
Counts how many times func_name is hit during the replay of the currently
loaded recording and returns the hit count.
"""
# Set a breakpoint for the specified function.
bp = gdb.Breakpoint(func_name)
# Do "continue" until we have gone through the whole recording, potentially
# hitting the breakpoint several times.
end_of_time = udb.get_event_log_extent().max_bbcount
while udb.time.get().bbcount < end_of_time:
gdb.execute("continue")
return bp.hit_count
# UDB will automatically load the modules passed to UdbLauncher.add_extension
# and, if present, automatically execute any function (with no arguments) called
# "run".
def run():
# The function where to stop is passed to us from the outer script in the
# run_data dictionary.
func_name = udb.run_data["func_name"]
hit_count = count_calls(func_name)
# Pass the number of time we hit the breakpoint back to the outer script.
udb.result_data["hit-count"] = hit_count
|
the-stack_106_30055 | #!/usr/local/bin/python
'''
Pipeline for converting XML RES data in JSON and importing into Elasticsearch.
'''
from bs4 import BeautifulSoup
import glob
import hashlib
import logging
import os
from os.path import join, dirname
import random
import re
import requests
import simplejson as json
import socket
import sys
import time
import urllib2
import xmltodict
import arrow
import elasticsearch
import luigi
from openfda import common, parallel, index_util, elasticsearch_requests
from openfda.annotation_table.pipeline import CombineHarmonization
from openfda.index_util import AlwaysRunTask
from openfda.res import annotate
from openfda.res import extract
RUN_DIR = dirname(dirname(os.path.abspath(__file__)))
BASE_DIR = './data'
ES_HOST = luigi.Parameter('localhost:9200', is_global=True)
CURRENT_XML_BASE_URL = ('http://www.accessdata.fda.gov/scripts/'
'enforcement/enforce_rpt-Product-Tabs.cfm?'
'action=Expand+Index&w=WEEK&lang=eng&xml')
RES_BASE_URL = ('http://www.fda.gov/Safety/Recalls/'
'EnforcementReports/default.htm')
def get_latest_date(input_url, download_url):
''' A function that grabs all of the hrefs pointing to the download page
and then grabs the max date from the url to set the upper bound date of
the download loop.
'''
url_open = urllib2.urlopen(input_url)
soup = BeautifulSoup(url_open)
re_string = download_url.split('WEEK')[0]
re_string = re_string.replace('?','\?')\
.replace('http:', '^http:')\
.replace('+','\+')
urls = soup.find_all('a', href=re.compile(re_string))
def date_from_url(url):
dt = str(url).split('w=')[1].split('&')[0]
(month, day, year) = (dt[:2], dt[2:4], dt[4:])
return arrow.get(int(year), int(month), int(day))
dates = [date_from_url(url) for url in urls]
return max(dates)
XML_START_DATE = arrow.get(2012, 6, 20)
XML_END_DATE = get_latest_date(RES_BASE_URL, CURRENT_XML_BASE_URL)
# The FDA transitioned from HTML recall events to XML during the summer of 2012.
# During that transition, there are two dates where the enforcement reports are
# availabe as XML, but using a slightly different URL. Also, what can be
# imagined as a manual transition between formats, the last date is 07-05-2012,
# messes up the date offset logic of 'every 7 days' and makes the transition
# to XML date logic a little quirky. These are collectively referred to as
# CROSSOVER_XML dates.
CROSSOVER_XML_START_DATE = XML_START_DATE
CROSSOVER_XML_END_DATE = XML_START_DATE.replace(days=+7)
CROSSOVER_XML_WEIRD_DATE = XML_END_DATE.replace(days=+8)
CROSSOVER_XML_URL = ('http://www.accessdata.fda.gov/scripts/'
'enforcement/enforce_rpt-Event-Tabs.cfm?'
'action=Expand+Index&w=WEEK&lang=eng&xml')
def random_sleep():
# Give the FDA webservers a break between requests
sleep_seconds = random.randint(1, 2)
logging.info('Sleeping %d seconds' % sleep_seconds)
time.sleep(sleep_seconds)
def download_to_file_with_retry(url, output_file):
logging.info('Downloading: ' + url)
url_open = None
# Retry up to 25 times before failing
for i in range(25):
try:
random_sleep()
url_open = urllib2.urlopen(url, timeout=5)
break
except socket.timeout:
logging.info('Timeout trying %s, retrying...', url)
continue
try:
content = url_open.read()
output_file.write(content)
return
except:
logging.fatal('Count not fetch in twenty five tries: ' + url)
class DownloadXMLReports(luigi.Task):
batch = luigi.Parameter()
def requires(self):
return []
def output(self):
batch_str = self.batch.strftime('%Y%m%d')
return luigi.LocalTarget(join(BASE_DIR, 'res/batches', batch_str))
def run(self):
output_dir = self.output().path
common.shell_cmd('mkdir -p %s', output_dir)
date = self.batch
if date >= CROSSOVER_XML_START_DATE and date <= CROSSOVER_XML_END_DATE:
url = CROSSOVER_XML_URL
else:
url = CURRENT_XML_BASE_URL
url = url.replace('WEEK', date.strftime('%m%d%Y'))
file_name = 'enforcementreport.xml'
xml_filepath = '%(output_dir)s/%(file_name)s' % locals()
xml_file = open(xml_filepath, 'w')
download_to_file_with_retry(url, xml_file)
class XML2JSONMapper(parallel.Mapper):
''' Mapper for the XML2JSON map-reduction. There is some special logic in here
to generate a hash() from the top level key/value pairs for the id in
Elasticsearch.
Also, upc and ndc are extracted and added to reports that are of drug
type.
'''
def _hash(self, doc_json):
''' Hash function used to create unique IDs for the reports
'''
hasher = hashlib.sha256()
hasher.update(doc_json)
return hasher.hexdigest()
def map(self, key, value, output):
# These keys must exist in the JSON records for the annotation logic to work
logic_keys = [
'code-info',
'report-date',
'product-description'
]
for val in value['recall-number']:
if val['product-type'] == 'Drugs':
val['upc'] = extract.extract_upc_from_recall(val)
val['ndc'] = extract.extract_ndc_from_recall(val)
# Copy the recall-number attribute value to an actual field
# The recall-number is not a reliable id, since it repeats
val['recall-number'] = val['@id']
# There is no good ID for the report, so we need to make one
doc_id = self._hash(json.dumps(val, sort_keys=True))
val['@id'] = doc_id
val['@version'] = 1
# Only write out vals that have required keys and a meaningful date
if set(logic_keys).issubset(val) and val['report-date'] != None:
output.add(doc_id, val)
else:
logging.warn('Docuemnt is missing required fields. %s',
json.dumps(val, indent=2, sort_keys=True))
class XML2JSON(luigi.Task):
batch = luigi.Parameter()
def requires(self):
return DownloadXMLReports(self.batch)
def output(self):
return luigi.LocalTarget(join(self.input().path, 'json.db'))
def run(self):
input_dir = self.input().path
for xml_filename in glob.glob('%(input_dir)s/*.xml' % locals()):
parallel.mapreduce(
input_collection=parallel.Collection.from_glob(xml_filename,
parallel.XMLDictInput),
mapper=XML2JSONMapper(),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path,
num_shards=1,
map_workers=1)
class AnnotateJSON(luigi.Task):
batch = luigi.Parameter()
def requires(self):
return [XML2JSON(self.batch), CombineHarmonization()]
def output(self):
output_dir = self.input()[0].path.replace('json.db', 'annotated.db')
return luigi.LocalTarget(output_dir)
def run(self):
input_db = self.input()[0].path
harmonized_file = self.input()[1].path
parallel.mapreduce(
input_collection=parallel.Collection.from_sharded(input_db),
mapper=annotate.AnnotateMapper(harmonized_file),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path,
num_shards=1,
map_workers=1)
class ResetElasticSearch(AlwaysRunTask):
es_host = ES_HOST
def _run(self):
es = elasticsearch.Elasticsearch(self.es_host)
elasticsearch_requests.load_mapping(
es, 'recall.base', 'enforcementreport', './schemas/res_mapping.json')
class LoadJSON(luigi.Task):
''' Load the annotated JSON documents into Elasticsearch
'''
batch = luigi.Parameter()
es_host = ES_HOST
epoch = time.time()
def requires(self):
return [ResetElasticSearch(), AnnotateJSON(self.batch)]
def output(self):
output = self.input()[1].path.replace('annotated.db', 'load.done')
return luigi.LocalTarget(output)
def run(self):
output_file = self.output().path
input_file = self.input()[1].path
es = elasticsearch.Elasticsearch(self.es_host)
index_util.start_index_transaction(es, 'recall', self.epoch)
parallel.mapreduce(
input_collection=parallel.Collection.from_sharded(input_file),
mapper=index_util.LoadJSONMapper(self.es_host,
'recall',
'enforcementreport',
self.epoch,
docid_key='@id',
version_key='@version'),
reducer=parallel.NullReducer(),
output_prefix='/tmp/loadjson.recall',
num_shards=1,
map_workers=1)
index_util.commit_index_transaction(es, 'recall')
common.shell_cmd('touch %s', output_file)
class RunWeeklyProcess(luigi.Task):
''' Generates a date object that is passed through the pipeline tasks in order
to generate and load JSON documents for the weekly enforcement reports.
There is some special date logic due to some gaps in the every 7 day
pattern.
'''
# TODO(hansnelsen): find a way to detect and auto adjust a gap in the every 7
# days logic.
def requires(self):
start = XML_START_DATE
end = get_latest_date(RES_BASE_URL, CURRENT_XML_BASE_URL)
for batch_date in arrow.Arrow.range('week', start, end):
# Handle annoying cases like July 5th, 2012 (it is +8)
if batch_date == arrow.get(2012, 7, 4):
batch = batch_date.replace(days=+1)
elif batch_date == arrow.get(2013, 12, 25):
batch = batch_date.replace(days=+1)
elif batch_date == arrow.get(2014, 10, 8):
batch = batch_date.replace(days=-1)
elif batch_date == arrow.get(2014, 12, 3):
batch = batch_date.replace(days=-1)
else:
batch = batch_date
yield LoadJSON(batch)
if __name__ == '__main__':
fmt_string = '%(created)f %(filename)s:%(lineno)s [%(funcName)s] %(message)s'
logging.basicConfig(stream=sys.stderr,
format=fmt_string,
level=logging.INFO)
# elasticsearch is too verbose by default (logs every successful request)
logging.getLogger('elasticsearch').setLevel(logging.WARN)
luigi.run(main_task_cls=RunWeeklyProcess)
|
the-stack_106_30058 | import uuid
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils import timezone
class UUIDModel(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
class Meta:
abstract = True
class User(AbstractUser, UUIDModel):
pass
class OwnedModel(models.Model):
created_at = models.DateTimeField(
editable=False,
default=timezone.now
)
user = models.ForeignKey(
'datasets.User',
on_delete=models.CASCADE,
related_name='+',
editable=False
)
class Meta:
abstract = True
|
the-stack_106_30060 | import traceback
from django.utils.decorators import method_decorator
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework.generics import CreateAPIView, ListAPIView, DestroyAPIView, RetrieveAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.status import HTTP_204_NO_CONTENT, HTTP_201_CREATED, HTTP_404_NOT_FOUND
import api.models as models
from api.serializers import SignUpSerializer, FavoritesSerializer
class AccessTokenParameter(openapi.Parameter):
def __init__(self,**extra):
super().__init__(
name='Authorization',
in_='header',
required=True,
type=openapi.TYPE_STRING,
description="Session JWT",
format="Bearer ACCESS-JWT",
**extra
)
class SignUp(CreateAPIView):
serializer_class = SignUpSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
data = serializer.data
headers = self.get_success_headers(serializer.data)
return Response(data, status=HTTP_201_CREATED, headers=headers)
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[AccessTokenParameter()],
operation_description="Get a list of registered favorite comics"
)
)
@method_decorator(
name="post",
decorator=swagger_auto_schema(
manual_parameters=[AccessTokenParameter()],
operation_description="Create a favorite comic"
)
)
class FavoritesCR(ListAPIView, CreateAPIView):
serializer_class = FavoritesSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
return models.Favorites.objects.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[AccessTokenParameter()],
operation_description="Get a single comic from favorite list"
)
)
@method_decorator(
name="delete",
decorator=swagger_auto_schema(
manual_parameters=[AccessTokenParameter()],
operation_description="Delete a single comic from favorite list"
)
)
class FavoritesGD(DestroyAPIView, RetrieveAPIView):
serializer_class = FavoritesSerializer
permission_classes = [IsAuthenticated]
lookup_field = 'comicId'
def get_queryset(self):
return models.Favorites.objects.filter(user=self.request.user)
|
the-stack_106_30061 | from __future__ import print_function, division
from typing import Optional, Union, List
import numpy as np
from scipy.ndimage.morphology import binary_erosion, binary_dilation
from skimage.morphology import erosion, dilation
from skimage.measure import label as label_cc # avoid namespace conflict
from scipy.signal import convolve2d
from .data_affinity import *
from .data_transform import *
RATES_TYPE = Optional[Union[List[int], int]]
def getSegType(mid):
# reduce the label dtype
m_type = np.uint64
if mid < 2**8:
m_type = np.uint8
elif mid < 2**16:
m_type = np.uint16
elif mid < 2**32:
m_type = np.uint32
return m_type
def relabel(seg, do_type=False):
# get the unique labels
uid = np.unique(seg)
# ignore all-background samples
if len(uid) == 1 and uid[0] == 0:
return seg
uid = uid[uid > 0]
mid = int(uid.max()) + 1 # get the maximum label for the segment
# create an array from original segment id to reduced id
m_type = seg.dtype
if do_type:
m_type = getSegType(mid)
mapping = np.zeros(mid, dtype=m_type)
mapping[uid] = np.arange(1, len(uid) + 1, dtype=m_type)
return mapping[seg]
def remove_small(seg, thres=100):
sz = seg.shape
seg = seg.reshape(-1)
uid, uc = np.unique(seg, return_counts=True)
seg[np.in1d(seg, uid[uc < thres])] = 0
return seg.reshape(sz)
def im2col(A, BSZ, stepsize=1):
# Parameters
M, N = A.shape
# Get Starting block indices
start_idx = np.arange(
0, M-BSZ[0]+1, stepsize)[:, None]*N + np.arange(0, N-BSZ[1]+1, stepsize)
# Get offsetted indices across the height and width of input array
offset_idx = np.arange(BSZ[0])[:, None]*N + np.arange(BSZ[1])
# Get all actual indices & index into input array for final output
return np.take(A, start_idx.ravel()[:, None] + offset_idx.ravel())
def seg_widen_border(seg, tsz_h=1):
# Kisuk Lee's thesis (A.1.4):
# "we preprocessed the ground truth seg such that any voxel centered on a 3 × 3 × 1 window containing
# more than one positive segment ID (zero is reserved for background) is marked as background."
# seg=0: background
tsz = 2*tsz_h+1
sz = seg.shape
if len(sz) == 3:
for z in range(sz[0]):
mm = seg[z].max()
patch = im2col(
np.pad(seg[z], ((tsz_h, tsz_h), (tsz_h, tsz_h)), 'reflect'), [tsz, tsz])
p0 = patch.max(axis=1)
patch[patch == 0] = mm+1
p1 = patch.min(axis=1)
seg[z] = seg[z]*((p0 == p1).reshape(sz[1:]))
else:
mm = seg.max()
patch = im2col(
np.pad(seg, ((tsz_h, tsz_h), (tsz_h, tsz_h)), 'reflect'), [tsz, tsz])
p0 = patch.max(axis=1)
patch[patch == 0] = mm + 1
p1 = patch.min(axis=1)
seg = seg * ((p0 == p1).reshape(sz))
return seg
def seg_to_small_seg(seg, thres=25, rr=2):
# rr: z/x-y resolution ratio
sz = seg.shape
mask = np.zeros(sz, np.uint8)
for z in np.where(seg.max(axis=1).max(axis=1) > 0)[0]:
tmp = label_cc(seg[z])
ui, uc = np.unique(tmp, return_counts=True)
rl = np.zeros(ui[-1]+1, np.uint8)
rl[ui[uc < thres]] = 1
rl[0] = 0
mask[z] += rl[tmp]
for y in np.where(seg.max(axis=2).max(axis=0) > 0)[0]:
tmp = label_cc(seg[:, y])
ui, uc = np.unique(tmp, return_counts=True)
rl = np.zeros(ui[-1]+1, np.uint8)
rl[ui[uc < thres//rr]] = 1
rl[0] = 0
mask[:, y] += rl[tmp]
for x in np.where(seg.max(axis=0).max(axis=0) > 0)[0]:
tmp = label_cc(seg[:, :, x])
ui, uc = np.unique(tmp, return_counts=True)
rl = np.zeros(ui[-1]+1, np.uint8)
rl[ui[uc < thres//rr]] = 1
rl[0] = 0
mask[:, :, x] += rl[tmp]
return mask
def seg_to_instance_bd(seg: np.ndarray,
tsz_h: int = 1,
do_bg: bool = True,
do_convolve: bool = True) -> np.ndarray:
"""Generate instance contour map from segmentation masks.
Args:
seg (np.ndarray): segmentation map (3D array is required).
tsz_h (int, optional): size of the dilation struct. Defaults: 1
do_bg (bool, optional): generate contour between instances and background. Defaults: True
do_convolve (bool, optional): convolve with edge filters. Defaults: True
Returns:
np.ndarray: binary instance contour map.
Note:
According to the experiment on the Lucchi mitochondria segmentation dastaset, convolving
the edge filters with segmentation masks to generate the contour map is about 3x larger
then using the `im2col` function. However, calculating the contour between only non-background
instances is not supported under the convolution mode.
"""
if do_bg == False:
do_convolve = False
sz = seg.shape
bd = np.zeros(sz, np.uint8)
tsz = tsz_h*2+1
if do_convolve:
sobel = [1, 0, -1]
sobel_x = np.array(sobel).reshape(3, 1)
sobel_y = np.array(sobel).reshape(1, 3)
for z in range(sz[0]):
slide = seg[z]
edge_x = convolve2d(slide, sobel_x, 'same', boundary='symm')
edge_y = convolve2d(slide, sobel_y, 'same', boundary='symm')
edge = np.maximum(np.abs(edge_x), np.abs(edge_y))
contour = (edge != 0).astype(np.uint8)
bd[z] = dilation(contour, np.ones((tsz, tsz), dtype=np.uint8))
return bd
mm = seg.max()
for z in range(sz[0]):
patch = im2col(
np.pad(seg[z], ((tsz_h, tsz_h), (tsz_h, tsz_h)), 'reflect'), [tsz, tsz])
p0 = patch.max(axis=1)
if do_bg: # at least one non-zero seg
p1 = patch.min(axis=1)
bd[z] = ((p0 > 0)*(p0 != p1)).reshape(sz[1:])
else: # between two non-zero seg
patch[patch == 0] = mm+1
p1 = patch.min(axis=1)
bd[z] = ((p0 != 0)*(p1 != 0)*(p0 != p1)).reshape(sz[1:])
return bd
def markInvalid(seg, iter_num=2, do_2d=True):
# find invalid
# if do erosion(seg==0), then miss the border
if do_2d:
stel = np.array([[1, 1, 1], [1, 1, 1]]).astype(bool)
if len(seg.shape) == 2:
out = binary_dilation(seg > 0, structure=stel, iterations=iter_num)
seg[out == 0] = -1
else: # save memory
for z in range(seg.shape[0]):
tmp = seg[z] # by reference
out = binary_dilation(
tmp > 0, structure=stel, iterations=iter_num)
tmp[out == 0] = -1
else:
stel = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]).astype(bool)
out = binary_dilation(seg > 0, structure=stel, iterations=iter_num)
seg[out == 0] = -1
return seg
def seg2binary(label, topt):
if len(topt) == 1:
return label > 0
fg_mask = np.zeros_like(label).astype(bool)
_, *fg_indices = topt.split('-')
for fg in fg_indices:
fg_mask = np.logical_or(fg_mask, label == int(fg))
return fg_mask
def seg2affinity(label, topt):
assert label.ndim in [2, 3], \
'Undefined affinity for ndim=' + str(label.ndim)
if len(topt) == 1:
return seg2aff_v0(label)
aff_func_dict = {
'v1': seg2aff_v1,
'v2': seg2aff_v2,
}
# valid format: 2-z-y-x-version
options = topt.split('-')
assert len(options) == 5
_, z, y, x, version = options
return aff_func_dict[version](
label, int(z), int(y), int(x))
def erode_label(label: np.ndarray,
index: int,
erosion_rates: RATES_TYPE = None):
if erosion_rates is None:
return label
label_erosion = erosion_rates
if isinstance(label_erosion, list):
label_erosion = label_erosion[index]
return seg_widen_border(label, label_erosion)
def dilate_label(label: np.ndarray,
index: int,
dilation_rates: RATES_TYPE = None):
if dilation_rates is None:
return label
label_dilation = dilation_rates
if isinstance(label_dilation, list):
label_dilation = label_dilation[index]
tsz = 2*label_dilation + 1
assert label.ndim in [2, 3]
shape = (1, tsz, tsz) if label.ndim == 3 else (tsz, tsz)
return dilation(label, np.ones(shape, dtype=label.dtype))
def seg2polarity(label):
# segmentation to 3-channel synaptic polarity masks
tmp = [None]*3
tmp[0] = np.logical_and((label % 2) == 1, label > 0)
tmp[1] = np.logical_and((label % 2) == 0, label > 0)
tmp[2] = (label > 0)
return np.stack(tmp, 0).astype(np.float32)
def seg_to_targets(label_orig: np.ndarray,
topts: List[str],
erosion_rates: RATES_TYPE = None,
dilation_rates: RATES_TYPE = None):
# input: (D, H, W), output: (C, D, H, W)
out = [None]*len(topts)
for tid, topt in enumerate(topts):
label = label_orig.copy()
label = erode_label(label, tid, erosion_rates)
label = dilate_label(label, tid, dilation_rates)
if topt[0] == '0': # binary mask
fg_mask = seg2binary(label, topt)
out[tid] = fg_mask[np.newaxis, :].astype(np.float32)
elif topt[0] == '1': # synaptic polarity
out[tid] = seg2polarity(label)
elif topt[0] == '2': # affinity
out[tid] = seg2affinity(label, topt)
elif topt[0] == '3': # small object mask
# size_thres: 2d threshold for small size
# zratio: resolution ration between z and x/y
# mask_dsize: mask dilation size
_, size_thres, zratio, _ = [int(x) for x in topt.split('-')]
out[tid] = (seg_to_small_seg(label, size_thres, zratio) > 0)[
None, :].astype(np.float32)
elif topt[0] == '4': # instance boundary mask
_, bd_sz, do_bg = [int(x) for x in topt.split('-')]
if label.ndim == 2:
out[tid] = seg_to_instance_bd(
label[None, :], bd_sz, do_bg).astype(np.float32)
else:
out[tid] = seg_to_instance_bd(label, bd_sz, do_bg)[
None, :].astype(np.float32)
elif topt[0] == '5': # distance transform (instance)
if len(topt) == 1:
topt = topt + '-2d'
_, mode = topt.split('-')
out[tid] = edt_instance(label.copy(), mode=mode)
elif topt[0] == '6': # distance transform (semantic)
if len(topt) == 1:
topt = topt + '-2d-8-50'
assert len(topt.split('-')) == 4
_, mode, a, b = topt.split('-')
distance = edt_semantic(label.copy(), mode, float(a), float(b))
out[tid] = distance[np.newaxis, :].astype(np.float32)
elif topt[0] == '9': # generic semantic segmentation
out[tid] = label.astype(np.int64)
else:
raise NameError("Target option %s is not valid!" % topt[0])
return out
|
the-stack_106_30062 | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import (
ModelNormal,
cached_property,
)
def lazy_import():
from datadog_api_client.v1.model.list_stream_column import ListStreamColumn
from datadog_api_client.v1.model.list_stream_query import ListStreamQuery
from datadog_api_client.v1.model.list_stream_response_format import ListStreamResponseFormat
globals()["ListStreamColumn"] = ListStreamColumn
globals()["ListStreamQuery"] = ListStreamQuery
globals()["ListStreamResponseFormat"] = ListStreamResponseFormat
class ListStreamWidgetRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
validations = {}
@cached_property
def openapi_types():
lazy_import()
return {
"columns": ([ListStreamColumn],),
"query": (ListStreamQuery,),
"response_format": (ListStreamResponseFormat,),
}
attribute_map = {
"columns": "columns",
"query": "query",
"response_format": "response_format",
}
read_only_vars = {}
def __init__(self, columns, query, response_format, *args, **kwargs):
"""ListStreamWidgetRequest - a model defined in OpenAPI
Args:
columns ([ListStreamColumn]): Widget columns.
query (ListStreamQuery):
response_format (ListStreamResponseFormat):
Keyword Args:
"""
super().__init__(kwargs)
self._check_pos_args(args)
self.columns = columns
self.query = query
self.response_format = response_format
@classmethod
def _from_openapi_data(cls, columns, query, response_format, *args, **kwargs):
"""Helper creating a new instance from a response."""
self = super(ListStreamWidgetRequest, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
self.columns = columns
self.query = query
self.response_format = response_format
return self
|
the-stack_106_30063 | from collections import deque
import numpy as np
import torch
from torch import nn
class PIDController(object):
def __init__(self, K_P=1.0, K_I=0.0, K_D=0.0, n=20):
self._K_P = K_P
self._K_I = K_I
self._K_D = K_D
self._window = deque([0 for _ in range(n)], maxlen=n)
self._max = 0.0
self._min = 0.0
def step(self, error):
self._window.append(error)
self._max = max(self._max, abs(error))
self._min = -abs(self._max)
if len(self._window) >= 2:
integral = np.mean(self._window)
derivative = (self._window[-1] - self._window[-2])
else:
integral = 0.0
derivative = 0.0
return self._K_P * error + self._K_I * integral + self._K_D * derivative |
the-stack_106_30064 | # -*- coding: utf-8 -*-
"""
Created on Mon May 26 23:42:03 2014
@author: Administrator
"""
from support import *
import hashlib
import io
import xml.dom.minidom
import random
import math
import os
import sys
default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
class safe_list(list):
def get(self, index, default=None):
try:
return self[index]
except IndexError:
return default
# Calculation is based on https://github.com/jabbany/CommentCoreLibrary/issues/5#issuecomment-40087282
# and https://github.com/m13253/danmaku2ass/issues/7#issuecomment-41489422
# ASS FOV = width*4/3.0
# But Flash FOV = width/math.tan(100*math.pi/360.0)/2 will be used instead
# Result: (transX, transY, rotX, rotY, rotZ, scaleX, scaleY)
def ConvertFlashRotation(rotY, rotZ, X, Y, width, height):
def WrapAngle(deg):
return 180-((180-deg) % 360)
rotY = WrapAngle(rotY)
rotZ = WrapAngle(rotZ)
if rotY in (90, -90):
rotY -= 1
if rotY == 0 or rotZ == 0:
outX = 0
outY = -rotY # Positive value means clockwise in Flash
outZ = -rotZ
rotY *= math.pi/180.0
rotZ *= math.pi/180.0
else:
rotY *= math.pi/180.0
rotZ *= math.pi/180.0
outY = math.atan2(-math.sin(rotY)*math.cos(rotZ), math.cos(rotY))*180/math.pi
outZ = math.atan2(-math.cos(rotY)*math.sin(rotZ), math.cos(rotZ))*180/math.pi
outX = math.asin(math.sin(rotY)*math.sin(rotZ))*180/math.pi
trX = (X*math.cos(rotZ)+Y*math.sin(rotZ))/math.cos(rotY)+(1-math.cos(rotZ)/math.cos(rotY))*width/2-math.sin(rotZ)/math.cos(rotY)*height/2
trY = Y*math.cos(rotZ)-X*math.sin(rotZ)+math.sin(rotZ)*width/2+(1-math.cos(rotZ))*height/2
trZ = (trX-width/2)*math.sin(rotY)
FOV = width*math.tan(2*math.pi/9.0)/2
try:
scaleXY = FOV/(FOV+trZ)
except ZeroDivisionError:
logging.error('Rotation makes object behind the camera: trZ == %.0f' % trZ)
scaleXY = 1
trX = (trX-width/2)*scaleXY+width/2
trY = (trY-height/2)*scaleXY+height/2
if scaleXY < 0:
scaleXY = -scaleXY
outX += 180
outY += 180
logging.error('Rotation makes object behind the camera: trZ == %.0f < %.0f' % (trZ, FOV))
return (trX, trY, WrapAngle(outX), WrapAngle(outY), WrapAngle(outZ), scaleXY*100, scaleXY*100)
def WriteCommentBilibiliPositioned(f, c, width, height, styleid):
#BiliPlayerSize = (512, 384) # Bilibili player version 2010
#BiliPlayerSize = (540, 384) # Bilibili player version 2012
BiliPlayerSize = (672, 438) # Bilibili player version 2014
ZoomFactor = GetZoomFactor(BiliPlayerSize, (width, height))
def GetPosition(InputPos, isHeight):
isHeight = int(isHeight) # True -> 1
if isinstance(InputPos, int):
return ZoomFactor[0]*InputPos+ZoomFactor[isHeight+1]
elif isinstance(InputPos, float):
if InputPos > 1:
return ZoomFactor[0]*InputPos+ZoomFactor[isHeight+1]
else:
return BiliPlayerSize[isHeight]*ZoomFactor[0]*InputPos+ZoomFactor[isHeight+1]
else:
try:
InputPos = int(InputPos)
except ValueError:
InputPos = float(InputPos)
return GetPosition(InputPos, isHeight)
try:
comment_args = safe_list(json.loads(c[3]))
text = ASSEscape(str(comment_args[4]).replace('/n', '\n'))
from_x = comment_args.get(0, 0)
from_y = comment_args.get(1, 0)
to_x = comment_args.get(7, from_x)
to_y = comment_args.get(8, from_y)
from_x = GetPosition(from_x, False)
from_y = GetPosition(from_y, True)
to_x = GetPosition(to_x, False)
to_y = GetPosition(to_y, True)
alpha = safe_list(str(comment_args.get(2, '1')).split('-'))
from_alpha = float(alpha.get(0, 1))
to_alpha = float(alpha.get(1, from_alpha))
from_alpha = 255-round(from_alpha*255)
to_alpha = 255-round(to_alpha*255)
rotate_z = int(comment_args.get(5, 0))
rotate_y = int(comment_args.get(6, 0))
lifetime = float(comment_args.get(3, 4500))
duration = int(comment_args.get(9, lifetime*1000))
delay = int(comment_args.get(10, 0))
fontface = comment_args.get(12)
isborder = comment_args.get(11, 'true')
from_rotarg = ConvertFlashRotation(rotate_y, rotate_z, from_x, from_y, width, height)
to_rotarg = ConvertFlashRotation(rotate_y, rotate_z, to_x, to_y, width, height)
styles = ['\\org(%d, %d)' % (width/2, height/2)]
if from_rotarg[0:2] == to_rotarg[0:2]:
styles.append('\\pos(%.0f, %.0f)' % (from_rotarg[0:2]))
else:
styles.append('\\move(%.0f, %.0f, %.0f, %.0f, %.0f, %.0f)' % (from_rotarg[0:2]+to_rotarg[0:2]+(delay, delay+duration)))
styles.append('\\frx%.0f\\fry%.0f\\frz%.0f\\fscx%.0f\\fscy%.0f' % (from_rotarg[2:7]))
if (from_x, from_y) != (to_x, to_y):
styles.append('\\t(%d, %d, ' % (delay, delay+duration))
styles.append('\\frx%.0f\\fry%.0f\\frz%.0f\\fscx%.0f\\fscy%.0f' % (to_rotarg[2:7]))
styles.append(')')
if fontface:
styles.append('\\fn%s' % ASSEscape(fontface))
styles.append('\\fs%.0f' % (c[6]*ZoomFactor[0]))
if c[5] != 0xffffff:
styles.append('\\c&H%s&' % ConvertColor(c[5]))
if c[5] == 0x000000:
styles.append('\\3c&HFFFFFF&')
if from_alpha == to_alpha:
styles.append('\\alpha&H%02X' % from_alpha)
elif (from_alpha, to_alpha) == (255, 0):
styles.append('\\fad(%.0f,0)' % (lifetime*1000))
elif (from_alpha, to_alpha) == (0, 255):
styles.append('\\fad(0, %.0f)' % (lifetime*1000))
else:
styles.append('\\fade(%(from_alpha)d, %(to_alpha)d, %(to_alpha)d, 0, %(end_time).0f, %(end_time).0f, %(end_time).0f)' % {'from_alpha': from_alpha, 'to_alpha': to_alpha, 'end_time': lifetime*1000})
if isborder == 'false':
styles.append('\\bord0')
f.write('Dialogue: -1,%(start)s,%(end)s,%(styleid)s,,0,0,0,,{%(styles)s}%(text)s\n' % {'start': ConvertTimestamp(c[0]), 'end': ConvertTimestamp(c[0]+lifetime), 'styles': ''.join(styles), 'text': text, 'styleid': styleid})
except (IndexError, ValueError) as e:
try:
logging.warning(_('Invalid comment: %r') % c[3])
except IndexError:
logging.warning(_('Invalid comment: %r') % c)
# Result: (f, dx, dy)
# To convert: NewX = f*x+dx, NewY = f*y+dy
def GetZoomFactor(SourceSize, TargetSize):
try:
if (SourceSize, TargetSize) == GetZoomFactor.Cached_Size:
return GetZoomFactor.Cached_Result
except AttributeError:
pass
GetZoomFactor.Cached_Size = (SourceSize, TargetSize)
try:
SourceAspect = SourceSize[0]/SourceSize[1]
TargetAspect = TargetSize[0]/TargetSize[1]
if TargetAspect < SourceAspect: # narrower
ScaleFactor = TargetSize[0]/SourceSize[0]
GetZoomFactor.Cached_Result = (ScaleFactor, 0, (TargetSize[1]-TargetSize[0]/SourceAspect)/2)
elif TargetAspect > SourceAspect: # wider
ScaleFactor = TargetSize[1]/SourceSize[1]
GetZoomFactor.Cached_Result = (ScaleFactor, (TargetSize[0]-TargetSize[1]*SourceAspect)/2, 0)
else:
GetZoomFactor.Cached_Result = (TargetSize[0]/SourceSize[0], 0, 0)
return GetZoomFactor.Cached_Result
except ZeroDivisionError:
GetZoomFactor.Cached_Result = (1, 0, 0)
return GetZoomFactor.Cached_Result
def WriteASSHead(f, width, height, fontface, fontsize, alpha, styleid):
f.write(
'''
[Script Info]
; Script generated by Danmaku2ASS
; https://github.com/m13253/danmaku2ass
Script Updated By: Danmaku2ASS (https://github.com/m13253/danmaku2ass)
ScriptType: v4.00+
PlayResX: %(width)d
PlayResY: %(height)d
Aspect Ratio: %(width)d:%(height)d
Collisions: Normal
WrapStyle: 2
ScaledBorderAndShadow: yes
YCbCr Matrix: TV.601
[V4+ Styles]
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
Style: %(styleid)s, %(fontface)s, %(fontsize).0f, &H%(alpha)02XFFFFFF, &H%(alpha)02XFFFFFF, &H%(alpha)02X000000, &H%(alpha)02X000000, 0, 0, 0, 0, 100, 100, 0.00, 0.00, 1, %(outline).0f, 0, 7, 0, 0, 0, 0
[Events]
Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
''' % {'width': width, 'height': height, 'fontface': fontface, 'fontsize': fontsize, 'alpha': 255-round(alpha*255), 'outline': max(fontsize/25.0, 1), 'styleid': styleid}
)
def TestFreeRows(rows, c, row, width, height, bottomReserved, lifetime):
res = 0
rowmax = height-bottomReserved
targetRow = None
if c[4] in (1, 2):
while row < rowmax and res < c[7]:
if targetRow != rows[c[4]][row]:
targetRow = rows[c[4]][row]
if targetRow and targetRow[0]+lifetime > c[0]:
break
row += 1
res += 1
else:
try:
thresholdTime = c[0]-lifetime*(1-width/(c[8]+width))
except ZeroDivisionError:
thresholdTime = c[0]-lifetime
while row < rowmax and res < c[7]:
if targetRow != rows[c[4]][row]:
targetRow = rows[c[4]][row]
try:
if targetRow and (targetRow[0] > thresholdTime or targetRow[0]+targetRow[8]*lifetime/(targetRow[8]+width) > c[0]):
break
except ZeroDivisionError:
pass
row += 1
res += 1
return res
def MarkCommentRow(rows, c, row):
row = int(row)
try:
for i in range(row, int(row+math.ceil(c[7]))):
rows[c[4]][i] = c
except IndexError:
pass
def ASSEscape(s):
def ReplaceLeadingSpace(s):
sstrip = s.strip(' ')
slen = len(s)
if slen == len(sstrip):
return s
else:
llen = slen-len(s.lstrip(' '))
rlen = slen-len(s.rstrip(' '))
return ''.join(('\u2007'*llen, sstrip, '\u2007'*rlen))
return '\\N'.join((ReplaceLeadingSpace(i) or ' ' for i in str(s).replace('\\', '\\\\').replace('{', '\\{').replace('}', '\\}').split('\n')))
def ConvertTimestamp(timestamp):
timestamp = round(timestamp*100.0)
hour, minute = divmod(timestamp, 360000)
minute, second = divmod(minute, 6000)
second, centsecond = divmod(second, 100)
return '%d:%02d:%02d.%02d' % (int(hour), int(minute), int(second), int(centsecond))
def ConvertType2(row, height, bottomReserved):
return height-bottomReserved-row
def FindAlternativeRow(rows, c, height, bottomReserved):
res = 0
for row in range(int(height-bottomReserved-math.ceil(c[7]))):
if not rows[c[4]][row]:
return row
elif rows[c[4]][row][0] < rows[c[4]][res][0]:
res = row
return res
def ConvertColor(RGB, width=1280, height=576):
if RGB == 0x000000:
return '000000'
elif RGB == 0xffffff:
return 'FFFFFF'
R = (RGB >> 16) & 0xff
G = (RGB >> 8) & 0xff
B = RGB & 0xff
if width < 1280 and height < 576:
return '%02X%02X%02X' % (B, G, R)
else: # VobSub always uses BT.601 colorspace, convert to BT.709
ClipByte = lambda x: 255 if x > 255 else 0 if x < 0 else round(x)
return '%02X%02X%02X' % (
ClipByte(R*0.00956384088080656+G*0.03217254540203729+B*0.95826361371715607),
ClipByte(R*-0.10493933142075390+G*1.17231478191855154+B*-0.06737545049779757),
ClipByte(R*0.91348912373987645+G*0.07858536372532510+B*0.00792551253479842)
)
def WriteComment(f, c, row, width, height, bottomReserved, fontsize, lifetime, styleid):
text = ASSEscape(c[3])
styles = []
if c[4] == 1:
styles.append('\\an8\\pos(%(halfwidth)d, %(row)d)' % {'halfwidth': width/2, 'row': row})
elif c[4] == 2:
styles.append('\\an2\\pos(%(halfwidth)d, %(row)d)' % {'halfwidth': width/2, 'row': ConvertType2(row, height, bottomReserved)})
elif c[4] == 3:
styles.append('\\move(%(neglen)d, %(row)d, %(width)d, %(row)d)' % {'width': width, 'row': row, 'neglen': -math.ceil(c[8])})
else:
styles.append('\\move(%(width)d, %(row)d, %(neglen)d, %(row)d)' % {'width': width, 'row': row, 'neglen': -math.ceil(c[8])})
if not (-1 < c[6]-fontsize < 1):
styles.append('\\fs%.0f' % c[6])
if c[5] != 0xffffff:
styles.append('\\c&H%s&' % ConvertColor(c[5]))
if c[5] == 0x000000:
styles.append('\\3c&HFFFFFF&')
## 替换空格
text = text.replace('\u2007',' ')
f.write('Dialogue: 2,%(start)s,%(end)s,%(styleid)s,,0000,0000,0000,,{%(styles)s}%(text)s\n' % {'start': ConvertTimestamp(c[0]), 'end': ConvertTimestamp(c[0]+lifetime), 'styles': ''.join(styles), 'text': text, 'styleid': styleid})
def CalculateLength(s):
return max(map(len, s.split('\n'))) # May not be accurate
def GetVideoInfo(aid,appkey,page = 1,AppSecret=None,fav = None):
paras = {'id': GetString(aid),'page': GetString(page)}
if fav != None:
paras['fav'] = fav
url = 'http://api.bilibili.cn/view?'+GetSign(paras,appkey,AppSecret)
jsoninfo = JsonInfo(url)
video = Video(aid,jsoninfo.Getvalue('title'))
video.guankan = jsoninfo.Getvalue('play')
video.commentNumber = jsoninfo.Getvalue('review')
video.danmu = jsoninfo.Getvalue('video_review')
video.shoucang = jsoninfo.Getvalue('favorites')
video.description = jsoninfo.Getvalue('description')
video.tag = []
taglist = jsoninfo.Getvalue('tag')
if taglist != None:
for tag in taglist.split(','):
video.tag.append(tag)
video.cover = jsoninfo.Getvalue('pic')
video.author = User(jsoninfo.Getvalue('mid'),jsoninfo.Getvalue('author'))
video.page = jsoninfo.Getvalue('pages')
video.date = jsoninfo.Getvalue('created_at')
video.credit = jsoninfo.Getvalue('credit')
video.coin = jsoninfo.Getvalue('coins')
video.spid = jsoninfo.Getvalue('spid')
video.cid = jsoninfo.Getvalue('cid')
video.offsite = jsoninfo.Getvalue('offsite')
video.partname = jsoninfo.Getvalue('partname')
video.src = jsoninfo.Getvalue('src')
video.tid = jsoninfo.Getvalue('tid')
video.typename = jsoninfo.Getvalue('typename')
video.instant_server = jsoninfo.Getvalue('instant_server')
return video
def GetSign(params,appkey,AppSecret=None):
"""
获取新版API的签名,不然会返回-3错误
待添加:【重要!】
需要做URL编码并保证字母都是大写,如 %2F
"""
params['appkey']=appkey
data = ""
paras = params.keys()
paras.sort()
for para in paras:
if data != "":
data += "&"
data += para + "=" + params[para]
if AppSecret == None:
return data
m = hashlib.md5()
m.update(data+AppSecret)
return data+'&sign='+m.hexdigest()
def GetDanmuku(cid):
cid = int(cid)
url = "http://comment.bilibili.cn/%d.xml"%(cid)
content = zlib.decompressobj(-zlib.MAX_WBITS).decompress(getURLContent(url))
# content = GetRE(content,r'<d p=[^>]*>([^<]*)<')
return content
#def FilterBadChars(f):
# s = f.read()
# s = re.sub('[\\x00-\\x08\\x0b\\x0c\\x0e-\\x1f]', '\ufffd', s)
# return io.StringIO(s)
def ReadCommentsBilibili(f, fontsize):
dom = xml.dom.minidom.parseString(f)
comment_element = dom.getElementsByTagName('d')
for i, comment in enumerate(comment_element):
try:
p = str(comment.getAttribute('p')).split(',')
assert len(p) >= 5
assert p[1] in ('1', '4', '5', '6', '7')
if p[1] != '7':
c = str(comment.childNodes[0].wholeText).replace('/n', '\n')
size = int(p[2])*fontsize/25.0
yield (float(p[0]), int(p[4]), i, c, {'1': 0, '4': 2, '5': 1, '6': 3}[p[1]], int(p[3]), size, (c.count('\n')+1)*size, CalculateLength(c)*size)
else: # positioned comment
c = str(comment.childNodes[0].wholeText)
yield (float(p[0]), int(p[4]), i, c, 'bilipos', int(p[3]), int(p[2]), 0, 0)
except (AssertionError, AttributeError, IndexError, TypeError, ValueError):
continue
def ConvertToFile(filename_or_file, *args, **kwargs):
return open(filename_or_file, *args, **kwargs)
def ProcessComments(comments, f, width, height, bottomReserved, fontface, fontsize, alpha, lifetime, reduced, progress_callback):
styleid = 'Danmaku2ASS_%04x' % random.randint(0, 0xffff)
WriteASSHead(f, width, height, fontface, fontsize, alpha, styleid)
rows = [[None]*(height-bottomReserved+1) for i in range(4)]
for idx, i in enumerate(comments):
if progress_callback and idx % 1000 == 0:
progress_callback(idx, len(comments))
if isinstance(i[4], int):
row = 0
rowmax = height-bottomReserved-i[7]
while row <= rowmax:
freerows = TestFreeRows(rows, i, row, width, height, bottomReserved, lifetime)
if freerows >= i[7]:
MarkCommentRow(rows, i, row)
WriteComment(f, i, row, width, height, bottomReserved, fontsize, lifetime, styleid)
break
else:
row += freerows or 1
else:
if not reduced:
row = FindAlternativeRow(rows, i, height, bottomReserved)
MarkCommentRow(rows, i, row)
WriteComment(f, i, row, width, height, bottomReserved, fontsize, lifetime, styleid)
elif i[4] == 'bilipos':
WriteCommentBilibiliPositioned(f, i, width, height, styleid)
elif i[4] == 'acfunpos':
WriteCommentAcfunPositioned(f, i, width, height, styleid)
elif i[4] == 'sH5Vpos':
WriteCommentSH5VPositioned(f, i, width, height, styleid)
else:
logging.warning(_('Invalid comment: %r') % i[3])
if progress_callback:
progress_callback(len(comments), len(comments))
def ReadComments(input_files, font_size=25.0):
comments = []
comments.extend(ReadCommentsBilibili(input_files, font_size))
comments.sort()
return comments
|
the-stack_106_30065 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 20 16:15:37 2021
@author: em42363
"""
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import numpy as np
import os
os.chdir(os.path.dirname(__file__))
import sys
sys.path.insert(0, r'C:\Users\eduar\OneDrive\PhD\UTuning')
sys.path.insert(0, r'C:\Users\em42363\OneDrive\PhD\UTuning')
from UTuning import scorer, plots, UTuning
#df = pd.read_csv(r'C:\Users\eduar\OneDrive\PhD\UTuning\dataset\unconv_MV.csv')
df = pd.read_csv(r'C:\Users\em42363\OneDrive\PhD\UTuning\dataset\unconv_MV.csv')
# In[1]: Split train test
y = df['Production'].values
X = df[['Por', 'LogPerm', 'Brittle', 'TOC']].values
scaler = MinMaxScaler()
scaler.fit(X)
Xs = scaler.transform(X)
ys = (y - y.min())/ (y.max()-y.min())
X_train, X_test, y_train, y_test = train_test_split(Xs, ys, test_size=0.33)
#%%
n_estimators = np.arange(90, 200, step=10)
lr = np.arange(0.001, 0.2, step=.001)
param_grid = {
"learning_rate": list(lr),
"n_estimators": list(n_estimators)
}
model=CatBoostRegressor(loss_function='RMSEWithUncertainty',
verbose=False,
random_seed=0)
random_cv = UTuning.RandomizedSearch(model, param_grid, cv = 2, n_iter = 25)
random_cv.fit(X_train, y_train)
#%% surface
df = pd.DataFrame(random_cv.cv_results_)
labels = {'x': 'n estimators',
'y': 'Learning rate',
'z': 'Model goodness'}
x = np.array(df['param_n_estimators'], dtype = float)
y = np.array(df['param_learning_rate'], dtype = float)
z = np.array(df['split0_test_score'], dtype = float)
plots.surface(x,
y,
z,
10,
labels)
# %% Tuned model
model = CatBoostRegressor(iterations=random_cv.best_params_['n_estimators'],
learning_rate=random_cv.best_params_['learning_rate'],
loss_function='RMSEWithUncertainty',
verbose=False, random_seed=0)
model.fit(X_train, y_train)
estimates = model.predict(X_test)
#%%
n_quantiles = 11
perc = np.linspace(0.0, 1.00, n_quantiles)
Prediction = estimates[:, 0]
#Knowledge_u = np.sqrt(np.var(Prediction, axis=1)) # Knowledge uncertainty
#Data_u = np.sqrt(np.mean(ens_preds[:, :, 1], axis=1)) # Data uncertainty
#Sigma = Knowledge_u+Data_u
Sigma = np.sqrt(estimates[:,1])
score = scorer.scorer(Prediction, y_test, Sigma)
#%%
IF_array = score.IndicatorFunction()
plots.error_accuracy_plot(perc, IF_array, Prediction, y_test, Sigma)
print('Accuracy = {0:2.2f}'.format(score.Accuracy()))
print('Precision = {0:2.2f}'.format(score.Precision()))
print('Goodness = {0:2.2f}'.format(score.Goodness()))
|
the-stack_106_30068 | import asyncio
import json
import logging
import time
from collections import defaultdict
from decimal import Decimal
from operator import itemgetter
import websockets
logger = logging.getLogger('luno_streams')
class BackoffException(Exception):
pass
class Updater:
def __init__(self, pair_code, api_key, api_secret, hooks=None):
self.pair_code = pair_code.upper()
self.api_key = api_key
self.api_secret = api_secret
self.sequence = None
self.bids = {}
self.asks = {}
self.websocket = None
self.hooks = hooks or []
self.time_last_connection_attempt = None
self.url = f'wss://ws.luno.com/api/1/stream/{self.pair_code}'
def check_backoff(self):
if self.time_last_connection_attempt is not None:
delta = time.time() - self.time_last_connection_attempt
if delta < 10:
raise BackoffException()
async def connect(self):
if self.websocket is not None: # reconnecting
logger.info(f'[{self.pair_code}] Closing existing connection...')
await self.websocket.close()
try:
self.check_backoff()
except BackoffException:
# do not attempt connection more that once every 10 seconds
logger.info('Waiting 10 seconds before attempting to connect...')
await asyncio.sleep(10)
self.time_last_connection_attempt = time.time()
logger.info(f'[{self.pair_code}] Connecting to {self.url}...')
self.websocket = await websockets.connect(self.url)
# no error handling - if connection fails, let it raise websocket Exception
await self.websocket.send(json.dumps({
'api_key_id': self.api_key,
'api_key_secret': self.api_secret,
}))
initial = await self.websocket.recv()
initial_data = json.loads(initial)
self.sequence = int(initial_data['sequence'])
self.asks = {x['id']: [Decimal(x['price']), Decimal(x['volume'])] for x in initial_data['asks']}
self.bids = {x['id']: [Decimal(x['price']), Decimal(x['volume'])] for x in initial_data['bids']}
logger.info(f'[{self.pair_code}] Initial state received.')
async def run(self):
await self.connect()
async for message in self.websocket:
if message == '""':
# keep alive
continue
await self.handle_message(message)
async def handle_message(self, message):
data = json.loads(message)
new_sequence = int(data['sequence'])
if new_sequence != self.sequence + 1:
logger.info(
f'[{self.pair_code}] Sequence broken: expected "{self.sequence+1}", received "{new_sequence}".'
)
logger.info(f'[{self.pair_code}] Reconnecting...')
return await self.connect()
self.sequence = new_sequence
trades = self.process_message(data)
for fn in self.hooks:
args = [self.consolidated_order_book, trades]
if asyncio.iscoroutinefunction(fn):
await fn(*args)
else:
fn(*args)
def process_message(self, data):
if data['delete_update']:
order_id = data['delete_update']['order_id']
try:
del self.bids[order_id]
except KeyError:
pass
try:
del self.asks[order_id]
except KeyError:
pass
if data['create_update']:
update = data['create_update']
price = Decimal(update['price'])
volume = Decimal(update['volume'])
key = update['order_id']
book = self.bids if update['type'] == 'BID' else self.asks
book[key] = [price, volume]
trades = []
if data['trade_updates']:
for update in data['trade_updates']:
update['price'] = Decimal(update['counter']) / Decimal(update['base'])
maker_order_id = update['maker_order_id']
if maker_order_id in self.bids:
self.update_existing_order(key='bids', update=update)
trades.append({**update, 'type': 'sell'})
elif maker_order_id in self.asks:
self.update_existing_order(key='asks', update=update)
trades.append({**update, 'type': 'buy'})
return trades
def update_existing_order(self, key, update):
book = getattr(self, key)
order_id = update['maker_order_id']
existing_order = book[order_id]
existing_volume = existing_order[1]
new_volume = existing_volume - Decimal(update['base'])
if new_volume == Decimal('0'):
del book[order_id]
else:
existing_order[1] -= Decimal(update['base'])
@property
def consolidated_order_book(self):
#Building
#Luno_BTC_bid_Orderbook_List = [(k,v) for k,v in dict.items(self.bids) if v[0] == max(self.bids, key=lambda x: x[0])]
#Luno_BTC_bid_Orderbook_List = [(k,v) for k,v in dict.items(self.bids) if v[0] == max(Luno_BTC_bid_Orderbook_List, key=lambda x: x[1][0])]
Luno_BTC_bid_Orderbook_List = [(k,v) for k,v in dict.items(self.bids)]
Highest_Bid_Order = max(Luno_BTC_bid_Orderbook_List, key=lambda x: x[1][0])
Luno_BTC_bid_Orderbook_List_Top_Bids = [Luno_BTC_bid_Orderbook_List[i] for i in range(0,len(Luno_BTC_bid_Orderbook_List)) if Luno_BTC_bid_Orderbook_List[i][1][0]==Highest_Bid_Order[1][0]]
Luno_BTC_ask_Orderbook_List = [(k,v) for k,v in dict.items(self.asks)]
Lowest_Ask_Order = min(Luno_BTC_ask_Orderbook_List, key=lambda x: x[1][0])
Luno_BTC_ask_Orderbook_List_Top_Asks = [Luno_BTC_ask_Orderbook_List[i] for i in range(0,len(Luno_BTC_ask_Orderbook_List)) if Luno_BTC_ask_Orderbook_List[i][1][0]==Lowest_Ask_Order[1][0]]
return [
Luno_BTC_bid_Orderbook_List_Top_Bids,
Luno_BTC_ask_Orderbook_List_Top_Asks,
]
|
the-stack_106_30072 | import ctypes
from typing import (
Generic,
TypeVar,
Any,
Type,
get_type_hints,
Callable,
Iterator,
Union
)
from typing_extensions import ParamSpec
import inspect
from functools import wraps
from contextlib import suppress
import faulthandler
from io import UnsupportedOperation
from contextlib import suppress
import sys
with suppress(UnsupportedOperation): # in case its running in idle or something like that
faulthandler.enable()
__all__ = (
"dereference_address",
"Pointer",
"to_ptr",
"decay"
)
T = TypeVar("T")
A = TypeVar("A")
P = ParamSpec("P")
def dereference_address(address: int) -> Any:
"""Dereference an address. Will cause a segmentation fault if the address is invalid."""
return ctypes.cast(address, ctypes.py_object).value
class Pointer(Generic[T]):
"""Base class representing a pointer."""
def __init__(self, address: int, typ: Type[T]) -> None:
self._address = address
self._type = typ
@property
def address(self) -> int:
"""Address of the pointer."""
return self._address
@property
def type(self) -> Type[T]:
"""Type of the pointer."""
return self._type
def __repr__(self) -> str:
return f"<pointer to {self.type.__name__} object at {hex(self.address)}>"
def __str__(self) -> str:
return hex(self.address)
def dereference(self) -> T:
"""Dereference the pointer."""
return dereference_address(self.address)
def __iter__(self) -> Iterator[T]:
"""Dereference the pointer."""
return iter({self.dereference()})
def __invert__(self) -> T:
"""Dereference the pointer."""
return self.dereference()
def assign(self, new: "Pointer[T]") -> None:
"""Point to a different address."""
if new.type is not self.type:
raise ValueError("new pointer must be the same type")
self._address = new.address
def __rshift__(self, value: Union["Pointer[T]", T]):
"""Point to a different address."""
self.assign(value if isinstance(value, Pointer) else to_ptr(value))
return self
def move(self, data: "Pointer[T]") -> None:
"""Move data from another pointer to this pointer. Very dangerous, use with caution."""
if data.type is not self.type:
raise ValueError("pointer must be the same type")
bytes_a = (ctypes.c_ubyte * sys.getsizeof(~data)).from_address(data.address)
bytes_b = (ctypes.c_ubyte * sys.getsizeof(~self)).from_address(self.address)
ctypes.memmove(bytes_b, bytes_a, len(bytes_a))
def __lshift__(self, data: Union["Pointer[T]", T]):
"""Move data from another pointer to this pointer. Very dangerous, use with caution."""
self.move(data if isinstance(data, Pointer) else to_ptr(data))
return self
def to_ptr(val: T) -> Pointer[T]:
"""Convert a value to a pointer."""
return Pointer(id(val), type(val))
def decay(func: Callable[P, T]) -> Callable[..., T]:
"""Automatically convert values to pointers when called."""
@wraps(func)
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
hints = get_type_hints(func)
actual: dict = {}
params = inspect.signature(func).parameters
for index, key in enumerate(params):
if key in kwargs:
actual[key] = kwargs[key]
else:
with suppress(IndexError):
actual[params[key].name] = args[index]
for key, value in hints.items():
if (hasattr(value, "__origin__")) and (value.__origin__ is Pointer):
actual[key] = to_ptr(actual[key])
return func(**actual)
return inner
|
the-stack_106_30073 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
import backoff
from google.api_core.exceptions import DeadlineExceeded, GoogleAPICallError
from google.cloud.exceptions import NotFound
import pytest
import translate_v3_create_glossary
import translate_v3_delete_glossary
import translate_v3_list_glossary
PROJECT_ID = os.environ["GCLOUD_PROJECT"]
GLOSSARY_INPUT_URI = "gs://cloud-samples-data/translation/glossary_ja.csv"
@pytest.fixture(scope="session")
def glossary():
"""Get the ID of a glossary available to session (do not mutate/delete)."""
glossary_id = "must-start-with-letters-" + str(uuid.uuid1())
translate_v3_create_glossary.create_glossary(
PROJECT_ID, GLOSSARY_INPUT_URI, glossary_id
)
yield glossary_id
# clean up
@backoff.on_exception(
backoff.expo, (DeadlineExceeded, GoogleAPICallError), max_time=60
)
def delete_glossary():
try:
translate_v3_delete_glossary.delete_glossary(
PROJECT_ID, glossary_id)
except NotFound as e:
# Ignoring this case.
print("Got NotFound, detail: {}".format(str(e)))
delete_glossary()
def test_list_glossary(capsys, glossary):
translate_v3_list_glossary.list_glossaries(PROJECT_ID)
out, _ = capsys.readouterr()
assert glossary in out
assert "gs://cloud-samples-data/translation/glossary_ja.csv" in out
|
the-stack_106_30074 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, core, model_helper, brew
class CopyOpsTest(unittest.TestCase):
def tearDown(self):
# Reset workspace after each test
# Otherwise, the multi-GPU test will use previously created tensors,
# which may have been placed on the wrong device
workspace.ResetWorkspace()
def run_test_copy_gradient(self, device_opt):
model = model_helper.ModelHelper(name="copy_test")
with core.DeviceScope(device_opt):
x = model.net.AddExternalInputs("x")
y = model.Copy(x, "y")
loss = model.AveragedLoss(y, "loss")
gradient_map = model.AddGradientOperators([loss])
workspace.FeedBlob(x, np.random.rand(32).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
self.assertTrue(np.array_equal(
workspace.FetchBlob(x),
workspace.FetchBlob(y),
))
self.assertTrue(np.array_equal(
workspace.FetchBlob(gradient_map[x]),
workspace.FetchBlob(gradient_map[y]),
))
def test_copy_gradient_cpu(self):
self.run_test_copy_gradient(core.DeviceOption(caffe2_pb2.CPU, 0))
@unittest.skipIf(workspace.NumGpuDevices() < 1, "Need at least 1 GPU.")
def test_copy_gradient_gpu(self):
self.run_test_copy_gradient(core.DeviceOption(workspace.GpuDeviceType, 0))
@unittest.skipIf(workspace.NumGpuDevices() < 2, "Need at least 2 GPU.")
def test_copy_gradient_multiple_gpus(self):
model = model_helper.ModelHelper(name="copy_test")
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU, 0)):
x_cpu = model.net.AddExternalInputs("x_cpu")
with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, 0)):
x_gpu_1 = model.CopyCPUToGPU(x_cpu, "x_gpu_1")
with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, 1)):
x_gpu_2 = model.Copy(x_gpu_1, "x_gpu_2")
loss = model.AveragedLoss(x_gpu_2, "loss")
gradient_map = model.AddGradientOperators([loss])
workspace.FeedBlob("x_cpu", np.random.rand(32).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
self.assertTrue(np.array_equal(
workspace.FetchBlob("x_gpu_1"),
workspace.FetchBlob("x_gpu_2"),
))
self.assertTrue(np.array_equal(
workspace.FetchBlob(gradient_map["x_gpu_1"]),
workspace.FetchBlob(gradient_map["x_gpu_2"]),
))
def get_op_with_output(model, output_blob_name):
for op in model.net.Proto().op:
if len(op.output) == 1 and op.output[0] == output_blob_name:
return op
return None
self.assertEqual(
get_op_with_output(model, "x_gpu_2_grad").device_option,
core.DeviceOption(workspace.GpuDeviceType, 1),
)
self.assertEqual(
get_op_with_output(model, "x_cpu_grad").device_option,
core.DeviceOption(workspace.GpuDeviceType, 0),
)
@unittest.skipIf(workspace.NumGpuDevices() < 1, "Need at least 1 GPU.")
def test_cpu2gpu_gpu2cpu_sparse_gradients(self):
model = model_helper.ModelHelper(name="copy_test")
v = model.param_init_net.UniformFill([], ["v"], shape=[16, 4])
indices = model.param_init_net.UniformFill([], ["v"], shape=[16, 4])
cpu_opt = core.DeviceOption(caffe2_pb2.CPU, 0)
gpu_opt = core.DeviceOption(workspace.GpuDeviceType, 0)
with core.DeviceScope(gpu_opt):
vcpu = model.CopyGPUToCPU(v, "vcpu")
with core.DeviceScope(cpu_opt):
g = model.Gather([vcpu, indices], "g")
with core.DeviceScope(gpu_opt):
ggpu = model.CopyCPUToGPU(g, "ggpu")
f = brew.fc(model, ggpu, "out", dim_in=4, dim_out=6)
(softmax, loss) = model.SoftmaxWithLoss(
[f, "label"],
["softmax", "loss"],
)
gradient_map = model.AddGradientOperators([loss])
self.assertTrue("v" in gradient_map)
self.assertTrue(isinstance(gradient_map['v'], core.GradientSlice))
@unittest.skipIf(workspace.NumGpuDevices() < 1, "Need at least 1 GPU.")
def test_cpu2gpu_gpu2cpu_gradients(self):
model = model_helper.ModelHelper(name="copy_test")
batch = 32
cpu_opt = core.DeviceOption(caffe2_pb2.CPU, 0)
gpu_opt = core.DeviceOption(workspace.GpuDeviceType, 0)
with core.NameScope("cpu"):
with core.DeviceScope(cpu_opt):
x_cpu = brew.fc(model, 'data', 'x_cpu', 16, 8)
with core.NameScope("gpu_0"):
with core.DeviceScope(gpu_opt):
x_gpu = model.CopyCPUToGPU(x_cpu, "x_gpu")
pred_gpu = brew.fc(model, x_gpu, "pred_gpu", 8, 4)
pred_cpu = model.CopyGPUToCPU(pred_gpu, "pred_cpu")
with core.DeviceScope(cpu_opt):
with core.NameScope("cpu"):
(softmax, loss) = model.SoftmaxWithLoss(
[pred_cpu, "label"],
["softmax", "loss"],
)
gradient_map = model.AddGradientOperators([loss])
# Add param updates (for cpu and gpu)
init_net = model.param_init_net
with core.DeviceScope(cpu_opt):
with core.NameScope("cpu"):
ONE = init_net.ConstantFill([], "ONE", shape=[1], value=1.)
LR = init_net.ConstantFill([], "LR", shape=[1], value=-2.0)
for param in model.GetParams():
model.WeightedSum(
[param, ONE, gradient_map[param], LR],
param,
)
with core.NameScope("gpu_0"):
with core.DeviceScope(gpu_opt):
ONE = init_net.ConstantFill([], "ONE", shape=[1], value=1.)
LR = init_net.ConstantFill([], "LR", shape=[1], value=-2.0)
for param in model.GetParams():
model.WeightedSum(
[param, ONE, gradient_map[param], LR],
param,
)
with core.DeviceScope(cpu_opt):
workspace.FeedBlob(
'cpu/data',
np.random.rand(batch, 16).astype(np.float32),
)
workspace.FeedBlob(
'cpu/label',
np.random.randint(4, size=batch).astype(np.int32),
)
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
initial_params = {p: workspace.FetchBlob(p) for p in model.GetParams()}
workspace.RunNet(model.net.Proto().name)
updated_params = {p: workspace.FetchBlob(p) for p in model.GetParams()}
for p in model.GetParams():
g = gradient_map[p]
expected = initial_params[p] - 2.0 * workspace.FetchBlob(g)
actual = updated_params[p]
self.assertTrue(
np.array_equal(expected, updated_params[p]),
"Mismatch: {}: {}, {}".format(p, expected, actual),
)
|
the-stack_106_30075 | # pylint: disable=protected-access
"""Main module of kytos/mef_eline Kytos Network Application.
NApp to provision circuits from user request.
"""
from threading import Lock
from flask import jsonify, request
from werkzeug.exceptions import (BadRequest, Conflict, Forbidden,
MethodNotAllowed, NotFound,
UnsupportedMediaType)
from kytos.core import KytosNApp, log, rest
from kytos.core.events import KytosEvent
from kytos.core.helpers import listen_to
from kytos.core.interface import TAG, UNI
from kytos.core.link import Link
from napps.kytos.mef_eline import settings
from napps.kytos.mef_eline.exceptions import InvalidPath
from napps.kytos.mef_eline.models import EVC, DynamicPathManager, Path
from napps.kytos.mef_eline.scheduler import CircuitSchedule, Scheduler
from napps.kytos.mef_eline.storehouse import StoreHouse
from napps.kytos.mef_eline.utils import emit_event, load_spec, validate
# pylint: disable=too-many-public-methods
class Main(KytosNApp):
"""Main class of amlight/mef_eline NApp.
This class is the entry point for this napp.
"""
spec = load_spec()
def setup(self):
"""Replace the '__init__' method for the KytosNApp subclass.
The setup method is automatically called by the controller when your
application is loaded.
So, if you have any setup routine, insert it here.
"""
# object used to scheduler circuit events
self.sched = Scheduler()
# object to save and load circuits
self.storehouse = StoreHouse(self.controller)
# set the controller that will manager the dynamic paths
DynamicPathManager.set_controller(self.controller)
# dictionary of EVCs created. It acts as a circuit buffer.
# Every create/update/delete must be synced to storehouse.
self.circuits = {}
self._lock = Lock()
self.execute_as_loop(settings.DEPLOY_EVCS_INTERVAL)
self.execution_rounds = 0
self.load_all_evcs()
def execute(self):
"""Execute once when the napp is running."""
if self._lock.locked():
return
log.debug("Starting consistency routine")
with self._lock:
self.execute_consistency()
log.debug("Finished consistency routine")
def execute_consistency(self):
"""Execute consistency routine."""
self.execution_rounds += 1
stored_circuits = self.storehouse.get_data().copy()
for circuit in tuple(self.circuits.values()):
stored_circuits.pop(circuit.id, None)
if (
circuit.is_enabled()
and not circuit.is_active()
and not circuit.lock.locked()
):
if circuit.check_traces():
log.info(f"{circuit} enabled but inactive - activating")
with circuit.lock:
circuit.activate()
circuit.sync()
else:
if self.execution_rounds > settings.WAIT_FOR_OLD_PATH:
log.info(f"{circuit} enabled but inactive - redeploy")
with circuit.lock:
circuit.deploy()
for circuit_id in stored_circuits:
log.info(f"EVC found in storehouse but unloaded {circuit_id}")
self._load_evc(stored_circuits[circuit_id])
def shutdown(self):
"""Execute when your napp is unloaded.
If you have some cleanup procedure, insert it here.
"""
@rest("/v2/evc/", methods=["GET"])
def list_circuits(self):
"""Endpoint to return circuits stored.
If archived is set to True return all circuits, else only the ones
not archived.
"""
log.debug("list_circuits /v2/evc")
archived = request.args.get("archived", False)
circuits = self.storehouse.get_data()
if not circuits:
return jsonify({}), 200
if archived:
return jsonify(circuits), 200
return (
jsonify(
{
circuit_id: circuit
for circuit_id, circuit in circuits.items()
if not circuit.get("archived", False)
}
),
200,
)
@rest("/v2/evc/<circuit_id>", methods=["GET"])
def get_circuit(self, circuit_id):
"""Endpoint to return a circuit based on id."""
log.debug("get_circuit /v2/evc/%s", circuit_id)
circuits = self.storehouse.get_data()
try:
result = circuits[circuit_id]
except KeyError:
result = f"circuit_id {circuit_id} not found"
log.debug("get_circuit result %s %s", result, 404)
raise BadRequest(result) from KeyError
status = 200
log.debug("get_circuit result %s %s", result, status)
return jsonify(result), status
@rest("/v2/evc/", methods=["POST"])
@validate(spec)
def create_circuit(self, data):
"""Try to create a new circuit.
Firstly, for EVPL: E-Line NApp verifies if UNI_A's requested C-VID and
UNI_Z's requested C-VID are available from the interfaces' pools. This
is checked when creating the UNI object.
Then, E-Line NApp requests a primary and a backup path to the
Pathfinder NApp using the attributes primary_links and backup_links
submitted via REST
# For each link composing paths in #3:
# - E-Line NApp requests a S-VID available from the link VLAN pool.
# - Using the S-VID obtained, generate abstract flow entries to be
# sent to FlowManager
Push abstract flow entries to FlowManager and FlowManager pushes
OpenFlow entries to datapaths
E-Line NApp generates an event to notify all Kytos NApps of a new EVC
creation
Finnaly, notify user of the status of its request.
"""
# Try to create the circuit object
log.debug("create_circuit /v2/evc/")
try:
evc = self._evc_from_dict(data)
except ValueError as exception:
log.debug("create_circuit result %s %s", exception, 400)
raise BadRequest(str(exception)) from BadRequest
if evc.primary_path:
try:
evc.primary_path.is_valid(
evc.uni_a.interface.switch,
evc.uni_z.interface.switch,
bool(evc.circuit_scheduler),
)
except InvalidPath as exception:
raise BadRequest(
f"primary_path is not valid: {exception}"
) from exception
if evc.backup_path:
try:
evc.backup_path.is_valid(
evc.uni_a.interface.switch,
evc.uni_z.interface.switch,
bool(evc.circuit_scheduler),
)
except InvalidPath as exception:
raise BadRequest(
f"backup_path is not valid: {exception}"
) from exception
# verify duplicated evc
if self._is_duplicated_evc(evc):
result = "The EVC already exists."
log.debug("create_circuit result %s %s", result, 409)
raise Conflict(result)
if (
not evc.primary_path
and evc.dynamic_backup_path is False
and evc.uni_a.interface.switch != evc.uni_z.interface.switch
):
result = "The EVC must have a primary path or allow dynamic paths."
log.debug("create_circuit result %s %s", result, 400)
raise BadRequest(result)
# store circuit in dictionary
self.circuits[evc.id] = evc
# save circuit
self.storehouse.save_evc(evc)
# Schedule the circuit deploy
self.sched.add(evc)
# Circuit has no schedule, deploy now
if not evc.circuit_scheduler:
with evc.lock:
evc.deploy()
# Notify users
event = KytosEvent(
name="kytos.mef_eline.created", content=evc.as_dict()
)
self.controller.buffers.app.put(event)
result = {"circuit_id": evc.id}
status = 201
log.debug("create_circuit result %s %s", result, status)
emit_event(self.controller, "created", evc_id=evc.id)
return jsonify(result), status
@rest("/v2/evc/<circuit_id>", methods=["PATCH"])
def update(self, circuit_id):
"""Update a circuit based on payload.
The EVC required attributes (name, uni_a, uni_z) can't be updated.
"""
log.debug("update /v2/evc/%s", circuit_id)
try:
evc = self.circuits[circuit_id]
except KeyError:
result = f"circuit_id {circuit_id} not found"
log.debug("update result %s %s", result, 404)
raise NotFound(result) from NotFound
if evc.archived:
result = "Can't update archived EVC"
log.debug("update result %s %s", result, 405)
raise MethodNotAllowed(["GET"], result)
try:
data = request.get_json()
except BadRequest:
result = "The request body is not a well-formed JSON."
log.debug("update result %s %s", result, 400)
raise BadRequest(result) from BadRequest
if data is None:
result = "The request body mimetype is not application/json."
log.debug("update result %s %s", result, 415)
raise UnsupportedMediaType(result) from UnsupportedMediaType
try:
enable, redeploy = evc.update(
**self._evc_dict_with_instances(data)
)
except ValueError as exception:
log.error(exception)
log.debug("update result %s %s", exception, 400)
raise BadRequest(str(exception)) from BadRequest
if evc.is_active():
if enable is False: # disable if active
with evc.lock:
evc.remove()
elif redeploy is not None: # redeploy if active
with evc.lock:
evc.remove()
evc.deploy()
else:
if enable is True: # enable if inactive
with evc.lock:
evc.deploy()
result = {evc.id: evc.as_dict()}
status = 200
log.debug("update result %s %s", result, status)
emit_event(self.controller, "updated", evc_id=evc.id, data=data)
return jsonify(result), status
@rest("/v2/evc/<circuit_id>", methods=["DELETE"])
def delete_circuit(self, circuit_id):
"""Remove a circuit.
First, the flows are removed from the switches, and then the EVC is
disabled.
"""
log.debug("delete_circuit /v2/evc/%s", circuit_id)
try:
evc = self.circuits[circuit_id]
except KeyError:
result = f"circuit_id {circuit_id} not found"
log.debug("delete_circuit result %s %s", result, 404)
raise NotFound(result) from NotFound
if evc.archived:
result = f"Circuit {circuit_id} already removed"
log.debug("delete_circuit result %s %s", result, 404)
raise NotFound(result) from NotFound
log.info("Removing %s", evc)
evc.remove_current_flows()
evc.deactivate()
evc.disable()
self.sched.remove(evc)
evc.archive()
evc.sync()
log.info("EVC removed. %s", evc)
result = {"response": f"Circuit {circuit_id} removed"}
status = 200
log.debug("delete_circuit result %s %s", result, status)
emit_event(self.controller, "deleted", evc_id=evc.id)
return jsonify(result), status
@rest("v2/evc/<circuit_id>/metadata", methods=["GET"])
def get_metadata(self, circuit_id):
"""Get metadata from an EVC."""
try:
return (
jsonify({"metadata": self.circuits[circuit_id].metadata}),
200,
)
except KeyError as error:
raise NotFound(f"circuit_id {circuit_id} not found.") from error
@rest("v2/evc/<circuit_id>/metadata", methods=["POST"])
def add_metadata(self, circuit_id):
"""Add metadata to an EVC."""
try:
metadata = request.get_json()
content_type = request.content_type
except BadRequest as error:
result = "The request body is not a well-formed JSON."
raise BadRequest(result) from error
if content_type is None:
result = "The request body is empty."
raise BadRequest(result)
if metadata is None:
if content_type != "application/json":
result = (
"The content type must be application/json "
f"(received {content_type})."
)
else:
result = "Metadata is empty."
raise UnsupportedMediaType(result)
try:
evc = self.circuits[circuit_id]
except KeyError as error:
raise NotFound(f"circuit_id {circuit_id} not found.") from error
evc.extend_metadata(metadata)
evc.sync()
return jsonify("Operation successful"), 201
@rest("v2/evc/<circuit_id>/metadata/<key>", methods=["DELETE"])
def delete_metadata(self, circuit_id, key):
"""Delete metadata from an EVC."""
try:
evc = self.circuits[circuit_id]
except KeyError as error:
raise NotFound(f"circuit_id {circuit_id} not found.") from error
evc.remove_metadata(key)
evc.sync()
return jsonify("Operation successful"), 200
@rest("/v2/evc/<circuit_id>/redeploy", methods=["PATCH"])
def redeploy(self, circuit_id):
"""Endpoint to force the redeployment of an EVC."""
log.debug("redeploy /v2/evc/%s/redeploy", circuit_id)
try:
evc = self.circuits[circuit_id]
except KeyError:
result = f"circuit_id {circuit_id} not found"
raise NotFound(result) from NotFound
if evc.is_enabled():
with evc.lock:
evc.remove_current_flows()
evc.deploy()
result = {"response": f"Circuit {circuit_id} redeploy received."}
status = 202
else:
result = {"response": f"Circuit {circuit_id} is disabled."}
status = 409
return jsonify(result), status
@rest("/v2/evc/schedule", methods=["GET"])
def list_schedules(self):
"""Endpoint to return all schedules stored for all circuits.
Return a JSON with the following template:
[{"schedule_id": <schedule_id>,
"circuit_id": <circuit_id>,
"schedule": <schedule object>}]
"""
log.debug("list_schedules /v2/evc/schedule")
circuits = self.storehouse.get_data().values()
if not circuits:
result = {}
status = 200
return jsonify(result), status
result = []
status = 200
for circuit in circuits:
circuit_scheduler = circuit.get("circuit_scheduler")
if circuit_scheduler:
for scheduler in circuit_scheduler:
value = {
"schedule_id": scheduler.get("id"),
"circuit_id": circuit.get("id"),
"schedule": scheduler,
}
result.append(value)
log.debug("list_schedules result %s %s", result, status)
return jsonify(result), status
@rest("/v2/evc/schedule/", methods=["POST"])
def create_schedule(self):
"""
Create a new schedule for a given circuit.
This service do no check if there are conflicts with another schedule.
Payload example:
{
"circuit_id":"aa:bb:cc",
"schedule": {
"date": "2019-08-07T14:52:10.967Z",
"interval": "string",
"frequency": "1 * * * *",
"action": "create"
}
}
"""
log.debug("create_schedule /v2/evc/schedule/")
json_data = self._json_from_request("create_schedule")
try:
circuit_id = json_data["circuit_id"]
except TypeError:
result = "The payload should have a dictionary."
log.debug("create_schedule result %s %s", result, 400)
raise BadRequest(result) from BadRequest
except KeyError:
result = "Missing circuit_id."
log.debug("create_schedule result %s %s", result, 400)
raise BadRequest(result) from BadRequest
try:
schedule_data = json_data["schedule"]
except KeyError:
result = "Missing schedule data."
log.debug("create_schedule result %s %s", result, 400)
raise BadRequest(result) from BadRequest
# Get EVC from circuits buffer
circuits = self._get_circuits_buffer()
# get the circuit
evc = circuits.get(circuit_id)
# get the circuit
if not evc:
result = f"circuit_id {circuit_id} not found"
log.debug("create_schedule result %s %s", result, 404)
raise NotFound(result) from NotFound
# Can not modify circuits deleted and archived
if evc.archived:
result = f"Circuit {circuit_id} is archived. Update is forbidden."
log.debug("create_schedule result %s %s", result, 403)
raise Forbidden(result) from Forbidden
# new schedule from dict
new_schedule = CircuitSchedule.from_dict(schedule_data)
# If there is no schedule, create the list
if not evc.circuit_scheduler:
evc.circuit_scheduler = []
# Add the new schedule
evc.circuit_scheduler.append(new_schedule)
# Add schedule job
self.sched.add_circuit_job(evc, new_schedule)
# save circuit to storehouse
evc.sync()
result = new_schedule.as_dict()
status = 201
log.debug("create_schedule result %s %s", result, status)
return jsonify(result), status
@rest("/v2/evc/schedule/<schedule_id>", methods=["PATCH"])
def update_schedule(self, schedule_id):
"""Update a schedule.
Change all attributes from the given schedule from a EVC circuit.
The schedule ID is preserved as default.
Payload example:
{
"date": "2019-08-07T14:52:10.967Z",
"interval": "string",
"frequency": "1 * * *",
"action": "create"
}
"""
log.debug("update_schedule /v2/evc/schedule/%s", schedule_id)
# Try to find a circuit schedule
evc, found_schedule = self._find_evc_by_schedule_id(schedule_id)
# Can not modify circuits deleted and archived
if not found_schedule:
result = f"schedule_id {schedule_id} not found"
log.debug("update_schedule result %s %s", result, 404)
raise NotFound(result) from NotFound
if evc.archived:
result = f"Circuit {evc.id} is archived. Update is forbidden."
log.debug("update_schedule result %s %s", result, 403)
raise Forbidden(result) from Forbidden
data = self._json_from_request("update_schedule")
new_schedule = CircuitSchedule.from_dict(data)
new_schedule.id = found_schedule.id
# Remove the old schedule
evc.circuit_scheduler.remove(found_schedule)
# Append the modified schedule
evc.circuit_scheduler.append(new_schedule)
# Cancel all schedule jobs
self.sched.cancel_job(found_schedule.id)
# Add the new circuit schedule
self.sched.add_circuit_job(evc, new_schedule)
# Save EVC to the storehouse
evc.sync()
result = new_schedule.as_dict()
status = 200
log.debug("update_schedule result %s %s", result, status)
return jsonify(result), status
@rest("/v2/evc/schedule/<schedule_id>", methods=["DELETE"])
def delete_schedule(self, schedule_id):
"""Remove a circuit schedule.
Remove the Schedule from EVC.
Remove the Schedule from cron job.
Save the EVC to the Storehouse.
"""
log.debug("delete_schedule /v2/evc/schedule/%s", schedule_id)
evc, found_schedule = self._find_evc_by_schedule_id(schedule_id)
# Can not modify circuits deleted and archived
if not found_schedule:
result = f"schedule_id {schedule_id} not found"
log.debug("delete_schedule result %s %s", result, 404)
raise NotFound(result)
if evc.archived:
result = f"Circuit {evc.id} is archived. Update is forbidden."
log.debug("delete_schedule result %s %s", result, 403)
raise Forbidden(result)
# Remove the old schedule
evc.circuit_scheduler.remove(found_schedule)
# Cancel all schedule jobs
self.sched.cancel_job(found_schedule.id)
# Save EVC to the storehouse
evc.sync()
result = "Schedule removed"
status = 200
log.debug("delete_schedule result %s %s", result, status)
return jsonify(result), status
def _is_duplicated_evc(self, evc):
"""Verify if the circuit given is duplicated with the stored evcs.
Args:
evc (EVC): circuit to be analysed.
Returns:
boolean: True if the circuit is duplicated, otherwise False.
"""
for circuit in tuple(self.circuits.values()):
if not circuit.archived and circuit.shares_uni(evc):
return True
return False
@listen_to("kytos/topology.link_up")
def on_link_up(self, event):
"""Change circuit when link is up or end_maintenance."""
self.handle_link_up(event)
def handle_link_up(self, event):
"""Change circuit when link is up or end_maintenance."""
log.debug("Event handle_link_up %s", event)
for evc in self.circuits.values():
if evc.is_enabled() and not evc.archived:
with evc.lock:
evc.handle_link_up(event.content["link"])
@listen_to("kytos/topology.link_down")
def on_link_down(self, event):
"""Change circuit when link is down or under_mantenance."""
self.handle_link_down(event)
def handle_link_down(self, event):
"""Change circuit when link is down or under_mantenance."""
log.debug("Event handle_link_down %s", event)
for evc in self.circuits.values():
with evc.lock:
if evc.is_affected_by_link(event.content["link"]):
log.debug(f"Handling evc {evc.id} on link down")
if evc.handle_link_down():
emit_event(
self.controller,
"redeployed_link_down",
evc_id=evc.id,
)
else:
emit_event(
self.controller,
"error_redeploy_link_down",
evc_id=evc.id,
)
def load_all_evcs(self):
"""Try to load all EVCs on startup."""
for circuit_id, circuit in self.storehouse.get_data().items():
if circuit_id not in self.circuits:
self._load_evc(circuit)
def _load_evc(self, circuit_dict):
"""Load one EVC from storehouse to memory."""
try:
evc = self._evc_from_dict(circuit_dict)
except ValueError as exception:
log.error(
f'Could not load EVC {circuit_dict["id"]} '
f"because {exception}"
)
return None
if evc.archived:
return None
evc.deactivate()
evc.sync()
self.circuits.setdefault(evc.id, evc)
self.sched.add(evc)
return evc
@listen_to("kytos/flow_manager.flow.error")
def on_flow_mod_error(self, event):
"""Handle flow mod errors related to an EVC."""
self.handle_flow_mod_error(event)
def handle_flow_mod_error(self, event):
"""Handle flow mod errors related to an EVC."""
flow = event.content["flow"]
command = event.content.get("error_command")
if command != "add":
return
evc = self.circuits.get(EVC.get_id_from_cookie(flow.cookie))
if evc:
evc.remove_current_flows()
def _evc_dict_with_instances(self, evc_dict):
"""Convert some dict values to instance of EVC classes.
This method will convert: [UNI, Link]
"""
data = evc_dict.copy() # Do not modify the original dict
for attribute, value in data.items():
# Get multiple attributes.
# Ex: uni_a, uni_z
if "uni" in attribute:
try:
data[attribute] = self._uni_from_dict(value)
except ValueError:
result = "Error creating UNI: Invalid value"
raise BadRequest(result) from BadRequest
if attribute == "circuit_scheduler":
data[attribute] = []
for schedule in value:
data[attribute].append(CircuitSchedule.from_dict(schedule))
# Get multiple attributes.
# Ex: primary_links,
# backup_links,
# current_links_cache,
# primary_links_cache,
# backup_links_cache
if "links" in attribute:
data[attribute] = [
self._link_from_dict(link) for link in value
]
# Ex: current_path,
# primary_path,
# backup_path
if "path" in attribute and attribute != "dynamic_backup_path":
data[attribute] = Path(
[self._link_from_dict(link) for link in value]
)
return data
def _evc_from_dict(self, evc_dict):
data = self._evc_dict_with_instances(evc_dict)
return EVC(self.controller, **data)
def _uni_from_dict(self, uni_dict):
"""Return a UNI object from python dict."""
if uni_dict is None:
return False
interface_id = uni_dict.get("interface_id")
interface = self.controller.get_interface_by_id(interface_id)
if interface is None:
result = (
"Error creating UNI:"
+ f"Could not instantiate interface {interface_id}"
)
raise ValueError(result) from ValueError
tag_dict = uni_dict.get("tag", None)
if tag_dict:
tag = TAG.from_dict(tag_dict)
else:
tag = None
uni = UNI(interface, tag)
return uni
def _link_from_dict(self, link_dict):
"""Return a Link object from python dict."""
id_a = link_dict.get("endpoint_a").get("id")
id_b = link_dict.get("endpoint_b").get("id")
endpoint_a = self.controller.get_interface_by_id(id_a)
endpoint_b = self.controller.get_interface_by_id(id_b)
link = Link(endpoint_a, endpoint_b)
if "metadata" in link_dict:
link.extend_metadata(link_dict.get("metadata"))
s_vlan = link.get_metadata("s_vlan")
if s_vlan:
tag = TAG.from_dict(s_vlan)
if tag is False:
error_msg = f"Could not instantiate tag from dict {s_vlan}"
raise ValueError(error_msg)
link.update_metadata("s_vlan", tag)
return link
def _find_evc_by_schedule_id(self, schedule_id):
"""
Find an EVC and CircuitSchedule based on schedule_id.
:param schedule_id: Schedule ID
:return: EVC and Schedule
"""
circuits = self._get_circuits_buffer()
found_schedule = None
evc = None
# pylint: disable=unused-variable
for c_id, circuit in circuits.items():
for schedule in circuit.circuit_scheduler:
if schedule.id == schedule_id:
found_schedule = schedule
evc = circuit
break
if found_schedule:
break
return evc, found_schedule
def _get_circuits_buffer(self):
"""
Return the circuit buffer.
If the buffer is empty, try to load data from storehouse.
"""
if not self.circuits:
# Load storehouse circuits to buffer
circuits = self.storehouse.get_data()
for c_id, circuit in circuits.items():
evc = self._evc_from_dict(circuit)
self.circuits[c_id] = evc
return self.circuits
@staticmethod
def _json_from_request(caller):
"""Return a json from request.
If it was not possible to get a json from the request, log, for debug,
who was the caller and the error that ocurred, and raise an
Exception.
"""
try:
json_data = request.get_json()
except ValueError as exception:
log.error(exception)
log.debug(f"{caller} result {exception} 400")
raise BadRequest(str(exception)) from BadRequest
except BadRequest:
result = "The request is not a valid JSON."
log.debug(f"{caller} result {result} 400")
raise BadRequest(result) from BadRequest
if json_data is None:
result = "Content-Type must be application/json"
log.debug(f"{caller} result {result} 415")
raise UnsupportedMediaType(result)
return json_data
|
the-stack_106_30076 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' from wikipedia: dx/dt = sigma*(y-x) ; dy/dt = x*(rho-z)-y dz/dt = x*y-beta*z ; '''
import numpy as np
import copy
def initialize(self,runInfoDict,inputFiles):
print('Life is beautiful my friends. Do not waste it!')
self.max_time = 0.03
self.t_step = 0.01
self.numberTimeSteps = int(self.max_time/self.t_step)
self.x = np.zeros(self.numberTimeSteps)
self.y = np.zeros(self.numberTimeSteps)
self.z = np.zeros(self.numberTimeSteps)
self.time = np.zeros(self.numberTimeSteps)-self.t_step
self.cnt = 0.0
return
def createNewInput(self,myInput,samplerType,**Kwargs): return Kwargs['SampledVars']
def run(self,Input):
self.cnt = 1.0
self.x0 = 1.0
self.y0 = 1.0
self.z0 = 1.0
self.x01 = copy.deepcopy(self.cnt+Input['x0'])
self.x02 = copy.deepcopy(self.cnt+Input['x0'])
self.z01 = copy.deepcopy(self.cnt+Input['x0'])
self.z02 = 101.0 - copy.deepcopy(self.cnt+Input['x0'])
self.y01 = copy.deepcopy(Input['x0'])
self.y02 = copy.deepcopy(Input['y0'])
self.time[0]= -self.t_step*5.0
self.x[0] = copy.deepcopy(self.cnt+Input['x0'])
self.y[0] = copy.deepcopy(self.cnt+Input['y0'])
self.z[0] = copy.deepcopy(self.cnt+Input['z0'])
for t in range ( self.numberTimeSteps-1):
self.time[t+1] = self.time[t] + self.t_step*5.0
self.x[t+1] = self.x[t] + (self.y[t]-self.x[t])*self.t_step
self.y[t+1] = self.y[t] + (self.x[t]*self.z[t]-self.y[t])*self.t_step
self.z[t+1] = self.z[t] + (self.x[t]*self.y[t]-self.z[t])*self.t_step
|
the-stack_106_30077 | from collections import deque
from dataset import *
import redis
import sys
import pyarrow as pa
import numpy as np
import pandas as pd
import time
import ray
import gc
import pickle
#ray.init(ignore_reinit_error=True) # do this locally
ray.init("auto", ignore_reinit_error=True, runtime_env={"working_dir":"/home/ubuntu/quokka","excludes":["*.csv","*.tbl","*.parquet"]})
#ray.timeline("profile.json")
@ray.remote
class Dataset:
def __init__(self, num_channels) -> None:
self.num_channels = num_channels
self.objects = {i: [] for i in range(self.num_channels)}
self.metadata = {}
self.remaining_channels = {i for i in range(self.num_channels)}
self.done = False
# only one person will ever call this, and that is the master node
# def change_num_channels(self, num_channels):
# if num_channels > self.num_channels:
# for i in range(self.num_channels, num_channels):
# self.objects[i] = {}
# self.num_channels = num_channels
def added_object(self, channel, object_handle):
if channel not in self.objects or channel not in self.remaining_channels:
raise Exception
self.objects[channel].append(object_handle)
def add_metadata(self, channel, object_handle):
if channel in self.metadata or channel not in self.remaining_channels:
raise Exception("Cannot add metadata for the same channel twice")
self.metadata[channel] = object_handle
def done_channel(self, channel):
self.remaining_channels.remove(channel)
if len(self.remaining_channels) == 0:
self.done = True
def is_complete(self):
return self.done
# debugging method
def print_all(self):
for channel in self.objects:
for object in self.objects[channel]:
r = redis.Redis(host=object[0], port=6800, db=0)
print(pickle.loads(r.get(object[1])))
def get_objects(self):
assert self.is_complete()
return self.objects
def to_pandas(self):
assert self.is_complete()
dfs = []
for channel in self.objects:
for object in self.objects[channel]:
r = redis.Redis(host=object[0], port=6800, db=0)
dfs.append(pickle.loads(r.get(object[1])))
return pd.concat(dfs)
class TaskNode:
# parallelism is going to be a dict of channel_id : ip
def __init__(self, id, channel_to_ip):
self.id = id
self.channel_to_ip = channel_to_ip
self.targets = {}
self.r = redis.Redis(host='localhost', port=6800, db=0)
self.target_rs = {}
self.target_ps = {}
# track the targets that are still alive
self.alive_targets = {}
# you are only allowed to send a message to a done target once. More than once is unforgivable. This is because the current mechanism
# checks if something is done, and then sends a message. The target can be done while the message is sending. But then come next message,
# the check must tell you that the target is done.
self.strikes = set()
def append_to_targets(self,tup):
node_id, channel_to_ip, partition_key = tup
unique_ips = set(channel_to_ip.values())
redis_clients = {i: redis.Redis(host=i, port=6800, db=0) if i != ray.util.get_node_ip_address() else redis.Redis(host='localhost', port = 6800, db=0) for i in unique_ips}
self.targets[node_id] = (channel_to_ip, partition_key)
self.target_rs[node_id] = {}
self.target_ps[node_id] = []
for channel in channel_to_ip:
self.target_rs[node_id][channel] = redis_clients[channel_to_ip[channel]]
for client in redis_clients:
pubsub = redis_clients[client].pubsub(ignore_subscribe_messages = True)
pubsub.subscribe("node-done-"+str(node_id))
self.target_ps[node_id].append(pubsub)
self.alive_targets[node_id] = {i for i in channel_to_ip}
for i in channel_to_ip:
self.strikes.add((node_id,i))
def initialize(self):
# child classes must override this method
raise NotImplementedError
def execute(self):
# child classes must override this method
raise NotImplementedError
# determines if there are still targets alive, returns True or False.
def update_targets(self):
for target_node in self.target_ps:
# there are #-ip locations you need to poll here.
for client in self.target_ps[target_node]:
while True:
message = client.get_message()
if message is not None:
print(message['data'])
self.alive_targets[target_node].remove(int(message['data']))
if len(self.alive_targets[target_node]) == 0:
self.alive_targets.pop(target_node)
else:
break
if len(self.alive_targets) > 0:
return True
else:
return False
def get_batches(self, mailbox, mailbox_id, p, my_id):
while True:
message = p.get_message()
if message is None:
break
if message['channel'].decode('utf-8') == "mailbox-" + str(self.id) + "-" + str(my_id):
mailbox.append(message['data'])
elif message['channel'].decode('utf-8') == "mailbox-id-" + str(self.id) + "-" + str(my_id):
mailbox_id.append(int(message['data']))
my_batches = {}
while len(mailbox) > 0 and len(mailbox_id) > 0:
first = mailbox.popleft()
stream_id = mailbox_id.popleft()
if len(first) < 10 and first.decode("utf-8") == "done":
# the responsibility for checking how many executors this input stream has is now resting on the consumer.
self.source_parallelism[stream_id] -= 1
if self.source_parallelism[stream_id] == 0:
self.input_streams.pop(self.physical_to_logical_stream_mapping[stream_id])
print("done", self.physical_to_logical_stream_mapping[stream_id])
else:
if stream_id in my_batches:
my_batches[stream_id].append(pickle.loads(first))
else:
my_batches[stream_id] = [pickle.loads(first)]
return my_batches
def push(self, data):
print("stream psuh start",time.time())
if not self.update_targets():
print("stream psuh end",time.time())
return False
if type(data) == pd.core.frame.DataFrame:
for target in self.alive_targets:
original_channel_to_ip, partition_key = self.targets[target]
for channel in self.alive_targets[target]:
if partition_key is not None:
if type(partition_key) == str:
payload = data[data[partition_key] % len(original_channel_to_ip) == channel]
print("payload size ",payload.memory_usage().sum(), channel)
elif callable(partition_key):
payload = partition_key(data, channel)
else:
raise Exception("Can't understand partition strategy")
else:
payload = data
# don't worry about target being full for now.
print("not checking if target is full. This will break with larger joins for sure.")
pipeline = self.target_rs[target][channel].pipeline()
#pipeline.publish("mailbox-"+str(target) + "-" + str(channel),context.serialize(payload).to_buffer().to_pybytes())
pipeline.publish("mailbox-"+str(target) + "-" + str(channel),pickle.dumps(payload))
pipeline.publish("mailbox-id-"+str(target) + "-" + str(channel),self.id)
results = pipeline.execute()
if False in results:
if (target, channel) not in self.strikes:
raise Exception
self.strikes.remove((target, channel))
else:
raise Exception
print("stream psuh end",time.time())
return True
def done(self):
if not self.update_targets():
return False
for target in self.alive_targets:
for channel in self.alive_targets[target]:
pipeline = self.target_rs[target][channel].pipeline()
pipeline.publish("mailbox-"+str(target) + "-" + str(channel),"done")
pipeline.publish("mailbox-id-"+str(target) + "-" + str(channel),self.id)
results = pipeline.execute()
if False in results:
if (target, channel) not in self.strikes:
print(target,channel)
raise Exception
self.strikes.remove((target, channel))
return True
@ray.remote
class NonBlockingTaskNode(TaskNode):
# this is for one of the parallel threads
def __init__(self, streams, datasets, functionObject, id, channel_to_ip, mapping, source_parallelism, ip) -> None:
super().__init__(id, channel_to_ip)
self.functionObject = functionObject
self.input_streams = streams
self.datasets = datasets
# this maps what the system's stream_id is to what the user called the stream when they created the task node
self.physical_to_logical_stream_mapping = mapping
self.source_parallelism = source_parallelism
self.ip = ip
def initialize(self, my_id):
if self.datasets is not None:
self.functionObject.initialize(self.datasets, my_id)
def execute(self, my_id):
print("task start",time.time())
p = self.r.pubsub(ignore_subscribe_messages=True)
p.subscribe("mailbox-" + str(self.id) + "-" + str(my_id), "mailbox-id-" + str(self.id) + "-" + str(my_id))
assert my_id in self.channel_to_ip
mailbox = deque()
mailbox_id = deque()
while len(self.input_streams) > 0:
my_batches = self.get_batches(mailbox, mailbox_id, p, my_id)
for stream_id in my_batches:
results = self.functionObject.execute(my_batches[stream_id], self.physical_to_logical_stream_mapping[stream_id], my_id)
if hasattr(self.functionObject, 'early_termination') and self.functionObject.early_termination:
break
# this is a very subtle point. You will only breakout if length of self.target, i.e. the original length of
# target list is bigger than 0. So you had somebody to send to but now you don't
if results is not None and len(self.targets) > 0:
break_out = False
assert type(results) == list
for result in results:
if self.push(result) is False:
break_out = True
break
if break_out:
break
else:
pass
obj_done = self.functionObject.done(my_id)
del self.functionObject
gc.collect()
if obj_done is not None:
self.push(obj_done)
self.done()
self.r.publish("node-done-"+str(self.id),str(my_id))
print("task end",time.time())
@ray.remote
class BlockingTaskNode(TaskNode):
# this is for one of the parallel threads
def __init__(self, streams, datasets, output_dataset, functionObject, id, channel_to_ip, mapping, source_parallelism, ip) -> None:
super().__init__(id, channel_to_ip)
self.functionObject = functionObject
self.input_streams = streams
self.datasets = datasets
self.output_dataset = output_dataset
# this maps what the system's stream_id is to what the user called the stream when they created the task node
self.physical_to_logical_stream_mapping = mapping
self.source_parallelism = source_parallelism
self.ip = ip
def initialize(self, my_id):
if self.datasets is not None:
self.functionObject.initialize(self.datasets, my_id)
# explicit override with error. Makes no sense to append to targets for a blocking node. Need to use the dataset instead.
def append_to_targets(self,tup):
raise Exception("Trying to stream from a blocking node")
def execute(self, my_id):
# this needs to change
print("task start",time.time())
p = self.r.pubsub(ignore_subscribe_messages=True)
p.subscribe("mailbox-" + str(self.id) + "-" + str(my_id), "mailbox-id-" + str(self.id) + "-" + str(my_id))
assert my_id in self.channel_to_ip
mailbox = deque()
mailbox_id = deque()
self.object_count = 0
while len(self.input_streams) > 0:
my_batches = self.get_batches( mailbox, mailbox_id, p, my_id)
for stream_id in my_batches:
results = self.functionObject.execute(my_batches[stream_id], self.physical_to_logical_stream_mapping[stream_id], my_id)
if hasattr(self.functionObject, 'early_termination') and self.functionObject.early_termination:
break
if results is not None and len(results) > 0:
assert type(results) == list
for result in results:
key = str(self.id) + "-" + str(my_id) + "-" + str(self.object_count)
self.object_count += 1
self.r.set(key, pickle.dumps(result))
self.output_dataset.added_object.remote(my_id, (ray.util.get_node_ip_address(), key, sys.getsizeof(result)))
else:
pass
obj_done = self.functionObject.done(my_id)
del self.functionObject
gc.collect()
if obj_done is not None:
key = str(self.id) + "-" + str(my_id) + "-" + str(self.object_count)
self.object_count += 1
self.r.set(key, pickle.dumps(obj_done))
self.output_dataset.added_object.remote(my_id, (ray.util.get_node_ip_address(), key, sys.getsizeof(obj_done)))
self.output_dataset.done_channel.remote(my_id)
self.done()
self.r.publish("node-done-"+str(self.id),str(my_id))
print("task end",time.time())
class InputNode(TaskNode):
def __init__(self, id, channel_to_ip, dependent_map = {}):
super().__init__(id, channel_to_ip)
self.dependent_rs = {}
self.dependent_parallelism = {}
for key in dependent_map:
self.dependent_parallelism[key] = dependent_map[key][1]
r = redis.Redis(host=dependent_map[key][0], port=6800, db=0)
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe("input-done-" + str(key))
self.dependent_rs[key] = p
def execute(self, id):
undone_dependencies = len(self.dependent_rs)
while undone_dependencies > 0:
time.sleep(0.001) # be nice
for dependent_node in self.dependent_rs:
message = self.dependent_rs[dependent_node].get_message()
if message is not None:
if message['data'].decode("utf-8") == "done":
self.dependent_parallelism[dependent_node] -= 1
if self.dependent_parallelism[dependent_node] == 0:
undone_dependencies -= 1
else:
raise Exception(message['data'])
print("input node start",time.time())
input_generator = self.accessor.get_next_batch(id)
for batch in input_generator:
if self.batch_func is not None:
print("batch func start",time.time())
result = self.batch_func(batch)
print("batch func end",time.time())
self.push(result)
else:
self.push(batch)
self.done()
self.r.publish("input-done-" + str(self.id), "done")
print("input node end",time.time())
@ray.remote
class InputS3CSVNode(InputNode):
def __init__(self,id, bucket, key, names, channel_to_ip, batch_func=None, sep = ",", stride = 64 * 1024 * 1024, dependent_map = {}):
super().__init__(id, channel_to_ip, dependent_map)
self.bucket = bucket
self.key = key
self.names = names
self.batch_func = batch_func
self.sep = sep
self.stride = stride
def initialize(self, my_id):
if self.bucket is None:
raise Exception
self.accessor = InputCSVDataset(self.bucket, self.key, self.names,0, sep = self.sep, stride = self.stride)
self.accessor.set_num_mappers(len(self.channel_to_ip))
@ray.remote
class InputS3MultiParquetNode(InputNode):
def __init__(self, id, bucket, key, channel_to_ip, columns = None, batch_func=None, dependent_map={}):
super().__init__(id, channel_to_ip, dependent_map)
self.bucket = bucket
self.key = key
self.columns = columns
self.batch_func = batch_func
def initialize(self, my_id):
if self.bucket is None:
raise Exception
self.accessor = InputMultiParquetDataset(self.bucket, self.key, columns = self.columns)
self.accessor.set_num_mappers(len(self.channel_to_ip))
@ray.remote
class InputRedisDatasetNode(InputNode):
def __init__(self, id, channel_objects, channel_to_ip, batch_func=None, dependent_map={}):
super().__init__(id, channel_to_ip, dependent_map)
self.channel_objects = channel_objects
self.batch_func = batch_func
def initialize(self, my_id):
ip_set = set()
for channel in self.channel_objects:
for object in self.channel_objects[channel]:
ip_set.add(object[0])
self.accessor = RedisObjectsDataset(self.channel_objects, ip_set)
class TaskGraph:
# this keeps the logical dependency DAG between tasks
def __init__(self) -> None:
self.current_node = 0
self.nodes = {}
self.node_channel_to_ip = {}
self.node_ips = {}
self.datasets = {}
def flip_ip_channels(self, ip_to_num_channel):
ips = list(ip_to_num_channel.keys())
starts = np.cumsum([0] + [ip_to_num_channel[ip] for ip in ips])
start_dict = {ips[k]: starts[k] for k in range(len(ips))}
lists_to_merge = [ {i: ip for i in range(start_dict[ip], start_dict[ip] + ip_to_num_channel[ip])} for ip in ips ]
channel_to_ip = {k: v for d in lists_to_merge for k, v in d.items()}
for key in channel_to_ip:
if channel_to_ip[key] == 'localhost':
channel_to_ip[key] = ray.worker._global_node.address.split(":")[0]
return channel_to_ip
def return_dependent_map(self, dependents):
dependent_map = {}
if len(dependents) > 0:
for node in dependents:
dependent_map[node] = (self.node_ips[node], len(self.node_channel_to_ip[node]))
return dependent_map
def new_input_redis(self, dataset, ip_to_num_channel, policy = "default", batch_func=None, dependents = []):
dependent_map = self.return_dependent_map(dependents)
channel_to_ip = self.flip_ip_channels(ip_to_num_channel)
# this will assert that the dataset is complete. You can only call this API on a completed dataset
objects = ray.get(dataset.get_objects.remote())
ip_to_channel_sets = {}
for channel in channel_to_ip:
ip = channel_to_ip[channel]
if ip not in ip_to_channel_sets:
ip_to_channel_sets[ip] = {channel}
else:
ip_to_channel_sets[ip].add(channel)
# current heuristics for scheduling objects to reader channels:
# if an object can be streamed out locally from someone, always do that
# try to balance the amounts of things that people have to stream out locally
# if an object cannot be streamed out locally, assign it to anyone
# try to balance the amounts of things that people have to fetch over the network.
channel_objects = {channel: [] for channel in channel_to_ip}
if policy == "default":
local_read_sizes = {channel: 0 for channel in channel_to_ip}
remote_read_sizes = {channel: 0 for channel in channel_to_ip}
for writer_channel in objects:
for object in objects[writer_channel]:
ip, key, size = object
# the object is on a machine that is not part of this task node, will have to remote fetch
if ip not in ip_to_channel_sets:
# find the channel with the least amount of remote read
my_channel = min(remote_read_sizes, key = remote_read_sizes.get)
channel_objects[my_channel].append(object)
remote_read_sizes[my_channel] += size
else:
eligible_sizes = {reader_channel : local_read_sizes[reader_channel] for reader_channel in ip_to_channel_sets[ip]}
my_channel = min(eligible_sizes, key = eligible_sizes.get)
channel_objects[my_channel].append(object)
local_read_sizes[my_channel] += size
else:
raise Exception("other distribution policies not implemented yet.")
print(channel_objects)
tasknode = []
for ip in ip_to_num_channel:
if ip != 'localhost':
tasknode.extend([InputRedisDatasetNode.options(num_cpus=0.001, resources={"node:" + ip : 0.001}).
remote(self.current_node, channel_objects, channel_to_ip, batch_func=batch_func, dependent_map=dependent_map) for i in range(ip_to_num_channel[ip])])
else:
tasknode.extend([InputRedisDatasetNode.options(num_cpus=0.001,resources={"node:" + ray.worker._global_node.address.split(":")[0] : 0.001}).
remote(self.current_node, channel_objects, channel_to_ip, batch_func=batch_func, dependent_map=dependent_map) for i in range(ip_to_num_channel[ip])])
self.nodes[self.current_node] = tasknode
self.node_channel_to_ip[self.current_node] = channel_to_ip
self.node_ips[self.current_node] = ip
self.current_node += 1
return self.current_node - 1
def new_input_csv(self, bucket, key, names, ip_to_num_channel, batch_func=None, sep = ",", dependents = [], stride= 64 * 1024 * 1024):
dependent_map = self.return_dependent_map(dependents)
channel_to_ip = self.flip_ip_channels(ip_to_num_channel)
tasknode = []
for ip in ip_to_num_channel:
if ip != 'localhost':
tasknode.extend([InputS3CSVNode.options(num_cpus=0.001, resources={"node:" + ip : 0.001}).
remote(self.current_node, bucket,key,names, channel_to_ip, batch_func = batch_func,sep = sep,
stride= stride, dependent_map = dependent_map, ) for i in range(ip_to_num_channel[ip])])
else:
tasknode.extend([InputS3CSVNode.options(num_cpus=0.001,resources={"node:" + ray.worker._global_node.address.split(":")[0] : 0.001}).
remote(self.current_node, bucket,key,names, channel_to_ip, batch_func = batch_func, sep = sep,
stride = stride, dependent_map = dependent_map, ) for i in range(ip_to_num_channel[ip])])
self.nodes[self.current_node] = tasknode
self.node_channel_to_ip[self.current_node] = channel_to_ip
self.node_ips[self.current_node] = ip
self.current_node += 1
return self.current_node - 1
def new_input_multiparquet(self, bucket, key, ip_to_num_channel, batch_func=None, columns = None, dependents = []):
dependent_map = self.return_dependent_map(dependents)
channel_to_ip = self.flip_ip_channels(ip_to_num_channel)
tasknode = []
for ip in ip_to_num_channel:
if ip != 'localhost':
tasknode.extend([InputS3MultiParquetNode.options(num_cpus=0.001, resources={"node:" + ip : 0.001}).
remote(self.current_node, bucket,key,channel_to_ip, columns = columns,
batch_func = batch_func,dependent_map = dependent_map) for i in range(ip_to_num_channel[ip])])
else:
tasknode.extend([InputS3MultiParquetNode.options(num_cpus=0.001,resources={"node:" + ray.worker._global_node.address.split(":")[0] : 0.001}).
remote(self.current_node, bucket,key,channel_to_ip, columns = columns,
batch_func = batch_func, dependent_map = dependent_map) for i in range(ip_to_num_channel[ip])])
self.nodes[self.current_node] = tasknode
self.node_channel_to_ip[self.current_node] = channel_to_ip
self.node_ips[self.current_node] = ip
self.current_node += 1
return self.current_node - 1
def new_non_blocking_node(self, streams, datasets, functionObject, ip_to_num_channel, partition_key):
channel_to_ip = self.flip_ip_channels(ip_to_num_channel)
# this is the mapping of physical node id to the key the user called in streams. i.e. if you made a node, task graph assigns it an internal id #
# then if you set this node as the input of this new non blocking task node and do streams = {0: node}, then mapping will be {0: the internal id of that node}
mapping = {}
# this is a dictionary of stream_id to the number of channels in that stream
source_parallelism = {}
for key in streams:
source = streams[key]
if source not in self.nodes:
raise Exception("stream source not registered")
ray.get([i.append_to_targets.remote((self.current_node, channel_to_ip, partition_key[key])) for i in self.nodes[source]])
mapping[source] = key
source_parallelism[source] = len(self.node_channel_to_ip[source]) # this only cares about how many channels the source has
tasknode = []
for ip in ip_to_num_channel:
if ip != 'localhost':
tasknode.extend([NonBlockingTaskNode.options(num_cpus = 0.001, resources={"node:" + ip : 0.001}).remote(streams, datasets, functionObject, self.current_node,
channel_to_ip, mapping, source_parallelism, ip) for i in range(ip_to_num_channel[ip])])
else:
tasknode.extend([NonBlockingTaskNode.options(num_cpus = 0.001, resources={"node:" + ray.worker._global_node.address.split(":")[0]: 0.001}).remote(streams,
datasets, functionObject, self.current_node, channel_to_ip, mapping, source_parallelism, ip) for i in range(ip_to_num_channel[ip])])
self.nodes[self.current_node] = tasknode
self.node_channel_to_ip[self.current_node] = channel_to_ip
self.node_ips[self.current_node] = ip
self.current_node += 1
return self.current_node - 1
def new_blocking_node(self, streams, datasets, functionObject, ip_to_num_channel, partition_key):
channel_to_ip = self.flip_ip_channels(ip_to_num_channel)
mapping = {}
source_parallelism = {}
for key in streams:
source = streams[key]
if source not in self.nodes:
raise Exception("stream source not registered")
ray.get([i.append_to_targets.remote((self.current_node, channel_to_ip, partition_key[key])) for i in self.nodes[source]])
mapping[source] = key
source_parallelism[source] = len(self.node_channel_to_ip[source]) # this only cares about how many channels the source has
# the datasets will all be managed on the head node. Note that they are not in charge of actually storing the objects, they just
# track the ids.
output_dataset = Dataset.options(num_cpus = 0.001, resources={"node:" + ray.worker._global_node.address.split(":")[0]: 0.001}).remote(len(channel_to_ip))
tasknode = []
for ip in ip_to_num_channel:
if ip != 'localhost':
tasknode.extend([BlockingTaskNode.options(num_cpus = 0.001, resources={"node:" + ip : 0.001}).remote(streams, datasets, output_dataset, functionObject, self.current_node,
channel_to_ip, mapping, source_parallelism, ip) for i in range(ip_to_num_channel[ip])])
else:
tasknode.extend([BlockingTaskNode.options(num_cpus = 0.001, resources={"node:" + ray.worker._global_node.address.split(":")[0]: 0.001}).remote(streams,
datasets, output_dataset, functionObject, self.current_node, channel_to_ip, mapping, source_parallelism, ip) for i in range(ip_to_num_channel[ip])])
self.nodes[self.current_node] = tasknode
self.node_channel_to_ip[self.current_node] = channel_to_ip
self.node_ips[self.current_node] = ip
self.current_node += 1
return output_dataset
def initialize(self):
processes = []
for key in self.nodes:
node = self.nodes[key]
for i in range(len(node)):
replica = node[i]
processes.append(replica.initialize.remote(i))
ray.get(processes)
def run(self):
processes = []
for key in self.nodes:
node = self.nodes[key]
for i in range(len(node)):
replica = node[i]
processes.append(replica.execute.remote(i))
ray.get(processes)
|
the-stack_106_30078 | # coding=utf-8
#
# Yu Wang (University of Yamanashi)
# May, 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import os
import copy
from glob import glob
from collections import namedtuple
'''Some Exception Classes'''
class WrongPath(Exception):pass
class WrongOperation(Exception):pass
class WrongDataFormat(Exception):pass
class ShellProcessError(Exception):pass
class KaldiProcessError(Exception):pass
class KenlmProcessError(Exception):pass
class UnsupportedType(Exception):pass
class UnsupportedKaldiVersion(Exception): pass
'''Version Control'''
_MAJOR_VERSION = '1'
_MINOR_VERSION = '3'
_PATCH_VERSION = '2'
_EXPECTED_KALDI_VERSION = "5.5"
_TIMEOUT = 500
class ExKaldiInfo( namedtuple("ExKaldiInfo",["version","major","minor","patch"]) ):
'''
Generate a object that carries various Exkaldi configurations.
'''
def initialize(self):
'''
Initialize.
'''
self.__KALDI_ROOT = None
self.__ENV = None
self.__LOG_DIR = None
# Update the root path of Kaldi
_ = self.KALDI_ROOT
# If Kaldi existed, check it's version
if not self.__KALDI_ROOT is None:
_ = self.KALDI
return self
@property
def EXKALDI(self):
'''
Get the Exkaldi version information.
Return:
A named tuple.
'''
return self
@property
def KALDI(self):
'''
Get Kaldi version number. It will consult the ".version" file in Kaldi root path.
Return:
if Kaldi has not been found:
return None.
elif ".version" has not been found:
return "unknown".
else:
return a named tuple of version number.
'''
if self.__KALDI_ROOT is None:
print("Warning: Kaldi toolkit was not found.")
return None
else:
filePath = os.path.join(self.__KALDI_ROOT, "src", ".version")
if not os.path.isfile(filePath):
print("Warning: Version information file was not found in Kaldi root directory.")
return "unknown"
else:
with open(filePath, "r", encoding="utf-8") as fr:
v = fr.readline().strip()
major, minor = v.split(".")[0:2]
if v != _EXPECTED_KALDI_VERSION:
raise UnsupportedKaldiVersion(f"Current Exkaldi only supports Kaldi version=={_EXPECTED_KALDI_VERSION} but got {v}.")
else:
return namedtuple("Kaldi", ["version", "major", "minor"])(v, major, minor)
@property
def KALDI_ROOT(self):
'''
Get The kaldi root path.
We allow Kaldi does not exist now but we expect it will be appointed later.
Return:
None if Kaldi has not been found in system PATH, or a string.
'''
if self.__KALDI_ROOT is None:
cmd = "which copy-feats"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy())
out, err = p.communicate()
if out == b'':
print("Warning: Kaldi root directory was not found in system PATH. You can appoint it:")
print("exkaldi.info.reset_kaldi_root( yourPath )")
print("If not, ERROR will occur when implementing some core functions.")
else:
self.__KALDI_ROOT = out.decode().strip()[0:-23]
self.reset_kaldi_root(self.__KALDI_ROOT)
return self.__KALDI_ROOT
@property
def ENV(self):
'''
Get the system environment in which ExKaldi are running.
Return:
a dict object.
'''
if self.__ENV is None:
self.__ENV = os.environ.copy()
# ENV is a dict object, so deepcopy it.
return copy.deepcopy(self.__ENV)
def reset_kaldi_root(self, path):
'''
Reset the root path of Kaldi toolkit and add related directories to system PATH manually.
Args:
<path>: a directory path.
'''
assert isinstance(path, str), "<path> should be a directory name-like string."
path = path.strip()
if not os.path.isdir(path):
raise WrongPath(f"No such directory: {path}.")
else:
path = os.path.abspath(path)
# verify this path roughly
cmd = os.path.join(f"ls {path}", "src", "featbin", "copy-feats")
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy())
out, err = p.communicate()
if out == b'':
raise WrongPath(f"{path} is not kaldi path avaliable.")
else:
self.__KALDI_ROOT = path
oldENV = self.ENV['PATH'] #deepcopied dict object
systemPATH = []
# Abandon old kaldi path of environment
for i in oldENV.split(':'):
if i.endswith( os.path.join("", "tools", "openfst") ):
continue
elif i.endswith( os.path.join("", "tools", "openfst", "bin") ):
continue
elif i.endswith( os.path.join("", "src", "featbin") ):
continue
elif i.endswith( os.path.join("", "src", "GAMbian") ):
continue
elif i.endswith( os.path.join("", "src", "nnetbin") ):
continue
elif i.endswith( os.path.join("", "src", "bin") ):
continue
elif i.endswith( os.path.join("", "src", "lmbin") ):
continue
elif i.endswith( os.path.join("", "src", "fstbin") ):
continue
elif i.endswith( os.path.join("", "src", "latbin") ):
continue
elif i.endswith( os.path.join("", "src", "gmmbin") ):
continue
else:
systemPATH.append(i)
# collect new paths
systemPATH.append( os.path.join(path, "src", "bin") )
systemPATH.append( os.path.join(path, "tools", "openfst") )
systemPATH.append( os.path.join(path, "tools", "openfst", "bin") )
systemPATH.append( os.path.join(path, "src", "featbin") )
systemPATH.append( os.path.join(path, "src", "GAMbian") )
systemPATH.append( os.path.join(path, "src", "nnetbin") )
systemPATH.append( os.path.join(path, "src", "lmbin") )
systemPATH.append( os.path.join(path, "src", "fstbin") )
systemPATH.append( os.path.join(path, "src", "latbin") )
systemPATH.append( os.path.join(path, "src", "gmmbin") )
# reset the environment
systemPATH = ":".join(systemPATH)
self.__ENV['PATH'] = systemPATH
def export_path(self, path):
'''
Add a path to Exkaldi environment PATH.
Args:
<path>: a path.
'''
if not os.path.exists(path):
raise WrongPath(f"No such path: {path}.")
systemPATH = self.ENV["PATH"]
systemPATH += f":{path}"
self.__ENV['PATH'] = systemPATH
def prepare_srilm(self):
'''
Prepare SriLM toolkit and add it to Exkaldi system PATH.
'''
if self.KALDI_ROOT is None:
raise WrongPath("Kaldi toolkit was not found.")
else:
SRILMROOT = os.path.join(self.KALDI_ROOT, "tools", "srilm")
if not os.path.isdir(SRILMROOT):
raise WrongPath("SRILM language model tool was not found. Please install it with KALDI_ROOT/tools/.install_srilm.sh .")
systemPATH = []
oldENV = self.ENV['PATH']
# Abandon old srilm path of environment
for i in oldENV.split(':'):
if i.endswith('srilm'):
continue
elif i.endswith( os.path.join('srilm','bin') ):
continue
elif i.endswith( os.path.join('srilm','bin','i686-m64') ):
continue
else:
systemPATH.append(i)
# Add new srilm path to environment
systemPATH.append( SRILMROOT )
systemPATH.append( os.path.join(SRILMROOT,'bin') )
systemPATH.append( os.path.join(SRILMROOT,'bin','i686-m64') )
systemPATH = ":".join(systemPATH)
self.__ENV['PATH'] = systemPATH
@property
def timeout(self):
return _TIMEOUT
def set_timeout(self,timeout):
'''
Reset the global timeout value.
Args:
<timeout>: a positive int value.
'''
assert isinstance(timeout, int) and timeout > 0, f"<timeout> must be a positive int value but got: {timeout}."
global _TIMEOUT
_TIMEOUT = timeout
# initialize version infomation
info = ExKaldiInfo(
'.'.join([_MAJOR_VERSION,_MINOR_VERSION,_PATCH_VERSION]),
_MAJOR_VERSION,
_MINOR_VERSION,
_PATCH_VERSION,
).initialize()
# clear the temporary files possibly being left.
garbageFiles = glob(os.path.join(" ","tmp","exkaldi_*").lstrip())
for t in garbageFiles:
os.remove(t) |
the-stack_106_30080 | from rlbot.agents.base_agent import BaseAgent
from rlbot.utils.structures.game_data_struct import GameTickPacket
from util.orientation import Orientation
from util.vec import Vec3
import util.const
from state.recover import Recover
import math
class Chase(Recover):
def __init__(self, agent: BaseAgent):
super().__init__(agent)
self.attachedTicks = 0
def tick(self, packet: GameTickPacket) -> bool:
if super().tick(packet):
return True
myCar = packet.game_cars[self.agent.index]
if self.agent.spikeWatcher.carrying_car == myCar:
self.attachedTicks += 1
if self.attachedTicks > 14:
return False
else:
self.attachedTicks = 0
targetSide = 1 - 2*myCar.team
carDirection = -myCar.physics.rotation.yaw
carLocation = Vec3(myCar.physics.location)
carVelocity = Vec3(myCar.physics.velocity)
carSpeed = carVelocity.length()
ballLocation = Vec3(packet.game_ball.physics.location)
ballVelocity = Vec3(packet.game_ball.physics.velocity)
ballFutureTime = 1 / 60
ball_prediction = self.agent.get_ball_prediction_struct()
closestTeamCarDistance = 9999999
bestCar = None
for j in range(0, packet.num_cars):
car = packet.game_cars[j]
distance = (ballLocation - Vec3(car.physics.location)).length()
if car.team == myCar.team and distance < closestTeamCarDistance and not car.is_demolished:
closestTeamCarDistance = distance
bestCar = car # initialise it in case ball prediction isnt productive
if ball_prediction is not None:
for i in range(0, ball_prediction.num_slices):
prediction_slice = ball_prediction.slices[i]
if prediction_slice.physics.location.z - max(prediction_slice.physics.velocity.z / 60, 0) < 100:
possibleBallFutureTime = (i + 1) / 60
possibleBallLocation = Vec3(prediction_slice.physics.location)
for j in range(0, packet.num_cars):
car = packet.game_cars[j]
if car.team == myCar.team and not car.is_demolished:
if ((ballLocation - Vec3(car.physics.location)).flat().length() - 200) / possibleBallFutureTime <= 2300:
ballFutureTime = possibleBallFutureTime
ballLocation = possibleBallLocation
ballVelocity = Vec3(prediction_slice.physics.velocity)
bestCar = car
break
else:
continue
break
shadowing = bestCar != myCar and (self.agent.spikeWatcher.carrying_car is None or self.agent.spikeWatcher.carrying_car.team == myCar.team)
if shadowing:
ballLocation.y -= 1250 * targetSide
ballToCarAbsoluteLocation = (ballLocation - carLocation).flat()
ballToCarLocation = ballToCarAbsoluteLocation.rotate_2D(carDirection)
ballToCarVelocity = (ballVelocity - carVelocity).flat().rotate_2D(carDirection)
angle = ballToCarLocation.atan2()
driveDistance = (ballLocation - carLocation).flat().length()
if self.agent.spikeWatcher.carrying_car is None or self.agent.spikeWatcher.carrying_car.team == myCar.team:
targetSpeed = max(driveDistance, 0) / ballFutureTime
targetThrottle = (targetSpeed - Vec3(myCar.physics.velocity).length()) / 300
targetThrottle = max(min((ballFutureTime - .25) * .8, (abs(ballToCarLocation.x) - 700) / 1500), targetThrottle)
targetThrottle = min(1, targetThrottle)
else:
targetThrottle = 1
steer = min(2, max(-2, 4 * angle))
# if ballToCarLocation.length() < 1000:
# steer += 0.005 * ballToCarVelocity.y
self.controller.steer = min(1, max(-1, steer))
frontOrBehind = 1 - math.fabs(angle) / (math.pi if ballToCarLocation.flat().length() > 500 else math.pi / 2) # allow backwards if close
turnThrottle = min(1, max(-1, math.copysign(.2, frontOrBehind) + frontOrBehind)) if driveDistance < 750 else 1
self.controller.throttle = targetThrottle * turnThrottle
minimumSpeedRequired = 2300 - 991.667/120 * (1 if self.controller.boost else 10)
wantToBoost = frontOrBehind > .9 and self.controller.throttle > .9 and ballToCarLocation.x > 700
self.controller.boost = (carSpeed < minimumSpeedRequired) and myCar.boost > 0 and wantToBoost
return True
|
the-stack_106_30081 | #!/usr/bin/env python
# Copyright (c) 2014 The Beginnercoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
# Add python-beginnercoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-beginnercoinrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from beginnercoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def run_test(nodes):
# Simple send, 0 to 1:
txid = nodes[0].sendtoaddress(nodes[1].getnewaddress(), 0.1)
sync_mempools(nodes)
check_array_result(nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
nodes[0].setgenerate(True, 1)
sync_blocks(nodes)
check_array_result(nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = nodes[0].sendtoaddress(nodes[0].getnewaddress(), 0.2)
check_array_result(nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { nodes[0].getnewaddress() : 0.11, nodes[1].getnewaddress() : 0.22,
nodes[0].getaccountaddress("from1") : 0.33, nodes[1].getaccountaddress("toself") : 0.44 }
txid = nodes[1].sendmany("", send_to)
sync_mempools(nodes)
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave beginnercoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing beginnercoind/beginnercoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
nodes = start_nodes(2, options.tmpdir)
connect_nodes(nodes[1], 0)
sync_blocks(nodes)
run_test(nodes)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
stop_nodes(nodes)
wait_beginnercoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
|
the-stack_106_30083 | from experiment_utils import set_env
set_env()
from cgn_framework.imagenet import train_cgn, config
import argparse
def disable_loss_from_config(cfg):
"""Disable the losses as specified in by the configuration of the experiment."""
if 'shape' in cfg.disable_loss:
cfg.LAMBDA.BINARY = 0
cfg.LAMBDA.MASK = 0
if 'text' in cfg.disable_loss:
cfg.LAMBDA.TEXT = [0, 0, 0, 0]
if 'background' in cfg.disable_loss:
cfg.LAMBDA.BG = 0
if 'reconstruction' in cfg.disable_loss:
cfg.LAMBDA.L1 = 0
cfg.LAMBDA.PERC = [0, 0, 0, 0]
def main(args):
cfg = config.get_cfg_defaults()
cfg = train_cgn.merge_args_and_cfg(args, cfg)
cfg.disable_loss = args.disable_loss
print(cfg)
disable_loss_from_config(cfg)
train_cgn.main(cfg)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Add arguments from original training script
parser.add_argument('--model_name', default='tmp',
help='Weights and samples will be saved under experiments/model_name')
parser.add_argument('--weights_path', default='',
help='provide path to continue training')
parser.add_argument('--sampled_fixed_noise', default=False, action='store_true',
help='If you want a different noise vector than provided in the repo')
parser.add_argument('--save_singles', default=False, action='store_true',
help='Save single images instead of sheets')
parser.add_argument('--truncation', type=float, default=1.0,
help='Truncation value for noise sampling')
parser.add_argument('--episodes', type=int, default=50,
help="We don't do dataloading, hence, one episode = one gradient update.")
parser.add_argument('--batch_sz', type=int, default=1,
help='Batch size, use in conjunciton with batch_acc')
parser.add_argument('--batch_acc', type=int, default=2048,
help='pseudo_batch_size = batch_acc*batch size')
parser.add_argument('--save_iter', type=int, default=2048,
help='Save samples/weights every n iter')
parser.add_argument('--log_losses', default=False, action='store_true',
help='Print out losses')
# Add new argument for that disables specific losses
parser.add_argument('--disable_loss', type=str, nargs='*', default=[],
choices=['shape', 'text', 'background', 'reconstruction'],
help='Choose 0 or more losses whose weight will become 0')
args = parser.parse_args()
main(args) |
the-stack_106_30087 | # -*- coding: utf-8 -*-
"""
Map
------
Classes for drawing maps.
"""
from __future__ import unicode_literals
import json
from collections import OrderedDict
from jinja2 import Environment, PackageLoader, Template
from branca.six import text_type, binary_type
from branca.utilities import _parse_size
from branca.element import (Element, Figure, MacroElement, Html,
JavascriptLink, CssLink)
ENV = Environment(loader=PackageLoader('folium', 'templates'))
_default_js = [
('leaflet',
'https://unpkg.com/[email protected]/dist/leaflet.js'),
('jquery',
'https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js'),
('bootstrap',
'https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js'),
('awesome_markers',
'https://cdnjs.cloudflare.com/ajax/libs/Leaflet.awesome-markers/2.0.2/leaflet.awesome-markers.js'), # noqa
('marker_cluster_src',
'https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.0.0/leaflet.markercluster-src.js'), # noqa
('marker_cluster',
'https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.0.0/leaflet.markercluster.js'), # noqa
]
_default_css = [
('leaflet_css',
'https://unpkg.com/[email protected]/dist/leaflet.css'),
('bootstrap_css',
'https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css'),
('bootstrap_theme_css',
'https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap-theme.min.css'), # noqa
('awesome_markers_font_css',
'https://maxcdn.bootstrapcdn.com/font-awesome/4.6.3/css/font-awesome.min.css'), # noqa
('awesome_markers_css',
'https://cdnjs.cloudflare.com/ajax/libs/Leaflet.awesome-markers/2.0.2/leaflet.awesome-markers.css'), # noqa
('marker_cluster_default_css',
'https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.0.0/MarkerCluster.Default.css'), # noqa
('marker_cluster_css',
'https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.0.0/MarkerCluster.css'), # noqa
('awesome_rotate_css',
'https://rawgit.com/python-visualization/folium/master/folium/templates/leaflet.awesome.rotate.css'), # noqa
]
class LegacyMap(MacroElement):
"""Create a Map with Folium and Leaflet.js
Generate a base map of given width and height with either default
tilesets or a custom tileset URL. The following tilesets are built-in
to Folium. Pass any of the following to the "tiles" keyword:
- "OpenStreetMap"
- "Mapbox Bright" (Limited levels of zoom for free tiles)
- "Mapbox Control Room" (Limited levels of zoom for free tiles)
- "Stamen" (Terrain, Toner, and Watercolor)
- "Cloudmade" (Must pass API key)
- "Mapbox" (Must pass API key)
- "CartoDB" (positron and dark_matter)
You can pass a custom tileset to Folium by passing a Leaflet-style
URL to the tiles parameter:
http://{s}.yourtiles.com/{z}/{x}/{y}.png
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Map (Northing, Easting).
width: pixel int or percentage string (default: '100%')
Width of the map.
height: pixel int or percentage string (default: '100%')
Height of the map.
tiles: str, default 'OpenStreetMap'
Map tileset to use. Can choose from a list of built-in tiles,
pass a custom URL or pass `None` to create a map without tiles.
API_key: str, default None
API key for Cloudmade or Mapbox tiles.
max_zoom: int, default 18
Maximum zoom depth for the map.
zoom_start: int, default 10
Initial zoom level for the map.
attr: string, default None
Map tile attribution; only required if passing custom tile URL.
detect_retina: bool, default False
If true and user is on a retina display, it will request four
tiles of half the specified size and a bigger zoom level in place
of one to utilize the high resolution.
crs : str, default 'EPSG3857'
Defines coordinate reference systems for projecting geographical points
into pixel (screen) coordinates and back.
You can use Leaflet's values :
* EPSG3857 : The most common CRS for online maps, used by almost all
free and commercial tile providers. Uses Spherical Mercator projection.
Set in by default in Map's crs option.
* EPSG4326 : A common CRS among GIS enthusiasts.
Uses simple Equirectangular projection.
* EPSG3395 : Rarely used by some commercial tile providers.
Uses Elliptical Mercator projection.
* Simple : A simple CRS that maps longitude and latitude into
x and y directly. May be used for maps of flat surfaces
(e.g. game maps). Note that the y axis should still be inverted
(going from bottom to top).
control_scale : bool, default False
Whether to add a control scale on the map.
prefer_canvas : bool, default False
Forces Leaflet to use the Canvas back-end (if available) for
vector layers instead of SVG. This can increase performance
considerably in some cases (e.g. many thousands of circle
markers on the map).
no_touch : bool, default False
Forces Leaflet to not use touch events even if it detects them.
disable_3d : bool, default False
Forces Leaflet to not use hardware-accelerated CSS 3D
transforms for positioning (which may cause glitches in some
rare environments) even if they're supported.
Returns
-------
Folium LegacyMap Object
Examples
--------
>>> map = folium.LegacyMap(location=[45.523, -122.675],
... width=750, height=500)
>>> map = folium.LegacyMap(location=[45.523, -122.675],
... tiles='Mapbox Control Room')
>>> map = folium.LegacyMap(location=(45.523, -122.675), max_zoom=20,
... tiles='Cloudmade', API_key='YourKey')
>>> map = folium.LegacyMap(location=[45.523, -122.675], zoom_start=2,
... tiles=('http://{s}.tiles.mapbox.com/v3/'
... 'mapbox.control-room/{z}/{x}/{y}.png'),
... attr='Mapbox attribution')
"""
def __init__(self, location=None, width='100%', height='100%',
left="0%", top="0%", position='relative',
tiles='OpenStreetMap', API_key=None, max_zoom=18, min_zoom=1,
zoom_start=10, continuous_world=False, world_copy_jump=False,
no_wrap=False, attr=None, min_lat=-90, max_lat=90,
min_lon=-180, max_lon=180, max_bounds=True,
detect_retina=False, crs='EPSG3857', control_scale=False,
prefer_canvas=False, no_touch=False, disable_3d=False):
super(LegacyMap, self).__init__()
self._name = 'Map'
self._env = ENV
if not location:
# If location is not passed we center and ignore zoom.
self.location = [0, 0]
self.zoom_start = min_zoom
else:
self.location = location
self.zoom_start = zoom_start
Figure().add_child(self)
# Map Size Parameters.
self.width = _parse_size(width)
self.height = _parse_size(height)
self.left = _parse_size(left)
self.top = _parse_size(top)
self.position = position
self.min_lat = min_lat
self.max_lat = max_lat
self.min_lon = min_lon
self.max_lon = max_lon
self.max_bounds = max_bounds
self.continuous_world = continuous_world
self.no_wrap = no_wrap
self.world_copy_jump = world_copy_jump
self.crs = crs
self.control_scale = control_scale
self.global_switches = GlobalSwitches(prefer_canvas, no_touch, disable_3d)
if tiles:
self.add_tile_layer(
tiles=tiles, min_zoom=min_zoom, max_zoom=max_zoom,
continuous_world=continuous_world, no_wrap=no_wrap, attr=attr,
API_key=API_key, detect_retina=detect_retina
)
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
position : {{this.position}};
width : {{this.width[0]}}{{this.width[1]}};
height: {{this.height[0]}}{{this.height[1]}};
left: {{this.left[0]}}{{this.left[1]}};
top: {{this.top[0]}}{{this.top[1]}};
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<div class="folium-map" id="{{this.get_name()}}" ></div>
{% endmacro %}
{% macro script(this, kwargs) %}
{% if this.max_bounds %}
var southWest = L.latLng({{ this.min_lat }}, {{ this.min_lon }});
var northEast = L.latLng({{ this.max_lat }}, {{ this.max_lon }});
var bounds = L.latLngBounds(southWest, northEast);
{% else %}
var bounds = null;
{% endif %}
var {{this.get_name()}} = L.map(
'{{this.get_name()}}',
{center: [{{this.location[0]}},{{this.location[1]}}],
zoom: {{this.zoom_start}},
maxBounds: bounds,
layers: [],
worldCopyJump: {{this.world_copy_jump.__str__().lower()}},
crs: L.CRS.{{this.crs}}
});
{% if this.control_scale %}L.control.scale().addTo({{this.get_name()}});{% endif %}
{% endmacro %}
""") # noqa
def _repr_html_(self, **kwargs):
"""Displays the Map in a Jupyter notebook.
"""
if self._parent is None:
self.add_to(Figure())
out = self._parent._repr_html_(**kwargs)
self._parent = None
else:
out = self._parent._repr_html_(**kwargs)
return out
def add_tile_layer(self, tiles='OpenStreetMap', name=None,
API_key=None, max_zoom=18, min_zoom=1,
continuous_world=False, attr=None, active=False,
detect_retina=False, no_wrap=False, **kwargs):
"""
Add a tile layer to the map. See TileLayer for options.
"""
tile_layer = TileLayer(tiles=tiles, name=name,
min_zoom=min_zoom, max_zoom=max_zoom,
attr=attr, API_key=API_key,
detect_retina=detect_retina,
continuous_world=continuous_world,
no_wrap=no_wrap)
self.add_child(tile_layer, name=tile_layer.tile_name)
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
# Set global switches
figure.header.add_child(self.global_switches, name='global_switches')
# Import Javascripts
for name, url in _default_js:
figure.header.add_child(JavascriptLink(url), name=name)
# Import Css
for name, url in _default_css:
figure.header.add_child(CssLink(url), name=name)
figure.header.add_child(Element(
'<style>html, body {'
'width: 100%;'
'height: 100%;'
'margin: 0;'
'padding: 0;'
'}'
'</style>'), name='css_style')
figure.header.add_child(Element(
'<style>#map {'
'position:absolute;'
'top:0;'
'bottom:0;'
'right:0;'
'left:0;'
'}'
'</style>'), name='map_style')
super(LegacyMap, self).render(**kwargs)
class GlobalSwitches(Element):
def __init__(self, prefer_canvas=False, no_touch=False, disable_3d=False):
super(GlobalSwitches, self).__init__()
self._name = 'GlobalSwitches'
self.prefer_canvas = prefer_canvas
self.no_touch = no_touch
self.disable_3d = disable_3d
self._template = Template(
'<script>'
'L_PREFER_CANVAS = {% if this.prefer_canvas %}true{% else %}false{% endif %}; '
'L_NO_TOUCH = {% if this.no_touch %}true{% else %}false{% endif %}; '
'L_DISABLE_3D = {% if this.disable_3d %}true{% else %}false{% endif %};'
'</script>'
)
class Layer(MacroElement):
"""An abstract class for everything that is a Layer on the map.
It will be used to define whether an object will be included in
LayerControls.
Parameters
----------
name : string, default None
The name of the Layer, as it will appear in LayerControls
overlay : bool, default False
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
"""
def __init__(self, name=None, overlay=False, control=True):
super(Layer, self).__init__()
self.layer_name = name if name is not None else self.get_name()
self.overlay = overlay
self.control = control
class TileLayer(Layer):
"""Create a tile layer to append on a Map.
Parameters
----------
tiles: str, default 'OpenStreetMap'
Map tileset to use. Can choose from this list of built-in tiles:
- "OpenStreetMap"
- "Mapbox Bright" (Limited levels of zoom for free tiles)
- "Mapbox Control Room" (Limited levels of zoom for free tiles)
- "Stamen" (Terrain, Toner, and Watercolor)
- "Cloudmade" (Must pass API key)
- "Mapbox" (Must pass API key)
- "CartoDB" (positron and dark_matter)
You can pass a custom tileset to Folium by passing a Leaflet-style
URL to the tiles parameter:
http://{s}.yourtiles.com/{z}/{x}/{y}.png
min_zoom: int, default 1
Minimal zoom for which the layer will be displayed.
max_zoom: int, default 18
Maximal zoom for which the layer will be displayed.
attr: string, default None
Map tile attribution; only required if passing custom tile URL.
API_key: str, default None
API key for Cloudmade or Mapbox tiles.
detect_retina: bool, default False
If true and user is on a retina display, it will request four
tiles of half the specified size and a bigger zoom level in place
of one to utilize the high resolution.
name : string, default None
The name of the Layer, as it will appear in LayerControls
overlay : bool, default False
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
"""
def __init__(self, tiles='OpenStreetMap', min_zoom=1, max_zoom=18,
attr=None, API_key=None, detect_retina=False,
continuous_world=False, name=None, overlay=False,
control=True, no_wrap=False):
self.tile_name = (name if name is not None else
''.join(tiles.lower().strip().split()))
super(TileLayer, self).__init__(name=self.tile_name, overlay=overlay,
control=control)
self._name = 'TileLayer'
self._env = ENV
self.min_zoom = min_zoom
self.max_zoom = max_zoom
self.no_wrap = no_wrap
self.continuous_world = continuous_world
self.detect_retina = detect_retina
self.tiles = ''.join(tiles.lower().strip().split())
if self.tiles in ('cloudmade', 'mapbox') and not API_key:
raise ValueError('You must pass an API key if using Cloudmade'
' or non-default Mapbox tiles.')
templates = list(self._env.list_templates(
filter_func=lambda x: x.startswith('tiles/')))
tile_template = 'tiles/'+self.tiles+'/tiles.txt'
attr_template = 'tiles/'+self.tiles+'/attr.txt'
if tile_template in templates and attr_template in templates:
self.tiles = self._env.get_template(tile_template).render(API_key=API_key) # noqa
self.attr = self._env.get_template(attr_template).render()
else:
self.tiles = tiles
if not attr:
raise ValueError('Custom tiles must'
' also be passed an attribution.')
if isinstance(attr, binary_type):
attr = text_type(attr, 'utf8')
self.attr = attr
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer(
'{{this.tiles}}',
{
maxZoom: {{this.max_zoom}},
minZoom: {{this.min_zoom}},
continuousWorld: {{this.continuous_world.__str__().lower()}},
noWrap: {{this.no_wrap.__str__().lower()}},
attribution: '{{this.attr}}',
detectRetina: {{this.detect_retina.__str__().lower()}}
}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""") # noqa
class FeatureGroup(Layer):
"""
Create a FeatureGroup layer ; you can put things in it and handle them
as a single layer. For example, you can add a LayerControl to
tick/untick the whole group.
Parameters
----------
name : str, default None
The name of the featureGroup layer.
It will be displayed in the LayerControl.
If None get_name() will be called to get the technical (ugly) name.
overlay : bool, default True
Whether your layer will be an overlay (ticked with a check box in
LayerControls) or a base layer (ticked with a radio button).
"""
def __init__(self, name=None, overlay=True, control=True):
super(FeatureGroup, self).__init__(overlay=overlay, control=control, name=name) # noqa
self._name = 'FeatureGroup'
self.tile_name = name if name is not None else self.get_name()
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.featureGroup(
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
class LayerControl(MacroElement):
"""
Creates a LayerControl object to be added on a folium map.
Parameters
----------
position : str
The position of the control (one of the map corners), can be
'topleft', 'topright', 'bottomleft' or 'bottomright'
default: 'topright'
collapsed : boolean
If true the control will be collapsed into an icon and expanded on
mouse hover or touch.
default: True
autoZIndex : boolean
If true the control assigns zIndexes in increasing order to all of
its layers so that the order is preserved when switching them on/off.
default: True
"""
def __init__(self, position='topright', collapsed=True, autoZIndex=True):
super(LayerControl, self).__init__()
self._name = 'LayerControl'
self.position = position
self.collapsed = str(collapsed).lower()
self.autoZIndex = str(autoZIndex).lower()
self.base_layers = OrderedDict()
self.overlays = OrderedDict()
self._template = Template("""
{% macro script(this,kwargs) %}
var {{this.get_name()}} = {
base_layers : { {% for key,val in this.base_layers.items() %}"{{key}}" : {{val}},{% endfor %} },
overlays : { {% for key,val in this.overlays.items() %}"{{key}}" : {{val}},{% endfor %} }
};
L.control.layers(
{{this.get_name()}}.base_layers,
{{this.get_name()}}.overlays,
{position: '{{this.position}}',
collapsed: {{this.collapsed}},
autoZIndex: {{this.autoZIndex}}
}).addTo({{this._parent.get_name()}});
{% endmacro %}
""") # noqa
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
# We select all Layers for which (control and not overlay).
self.base_layers = OrderedDict(
[(val.layer_name, val.get_name()) for key, val in
self._parent._children.items() if isinstance(val, Layer) and
(not hasattr(val, 'overlay') or not val.overlay) and
(not hasattr(val, 'control') or val.control)])
# We select all Layers for which (control and overlay).
self.overlays = OrderedDict(
[(val.layer_name, val.get_name()) for key, val in
self._parent._children.items() if isinstance(val, Layer) and
(hasattr(val, 'overlay') and val.overlay) and
(not hasattr(val, 'control') or val.control)])
super(LayerControl, self).render()
class Icon(MacroElement):
"""
Creates an Icon object that will be rendered
using Leaflet.awesome-markers.
Parameters
----------
color : str, default 'blue'
The color of the marker. You can use:
['red', 'blue', 'green', 'purple', 'orange', 'darkred',
'lightred', 'beige', 'darkblue', 'darkgreen', 'cadetblue',
'darkpurple', 'white', 'pink', 'lightblue', 'lightgreen',
'gray', 'black', 'lightgray']
icon_color : str, default 'white'
The color of the drawing on the marker. You can use colors above,
or an html color code.
icon : str, default 'info-sign'
The name of the marker sign.
See Font-Awesome website to choose yours.
Warning : depending on the icon you choose you may need to adapt
the `prefix` as well.
angle : int, default 0
The icon will be rotated by this amount of degrees.
prefix : str, default 'glyphicon'
The prefix states the source of the icon. 'fa' for font-awesome or
'glyphicon' for bootstrap 3.
For more details see:
https://github.com/lvoogdt/Leaflet.awesome-markers
"""
def __init__(self, color='blue', icon_color='white', icon='info-sign',
angle=0, prefix='glyphicon'):
super(Icon, self).__init__()
self._name = 'Icon'
self.color = color
self.icon = icon
self.icon_color = icon_color
self.angle = angle
self.prefix = prefix
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.AwesomeMarkers.icon({
icon: '{{this.icon}}',
iconColor: '{{this.icon_color}}',
markerColor: '{{this.color}}',
prefix: '{{this.prefix}}',
extraClasses: 'fa-rotate-{{this.angle}}'
});
{{this._parent.get_name()}}.setIcon({{this.get_name()}});
{% endmacro %}
""")
class Marker(MacroElement):
"""Create a simple stock Leaflet marker on the map, with optional
popup text or Vincent visualization.
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Marker (Northing, Easting)
popup: string or folium.Popup, default None
Input text or visualization for object.
icon: Icon plugin
the Icon plugin to use to render the marker.
Returns
-------
Marker names and HTML in obj.template_vars
Examples
--------
>>> Marker(location=[45.5, -122.3], popup='Portland, OR')
>>> Marker(location=[45.5, -122.3], popup=folium.Popup('Portland, OR'))
"""
def __init__(self, location, popup=None, icon=None):
super(Marker, self).__init__()
self._name = 'Marker'
self.location = location
if icon is not None:
self.add_child(icon)
if isinstance(popup, text_type) or isinstance(popup, binary_type):
self.add_child(Popup(popup))
elif popup is not None:
self.add_child(popup)
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.marker(
[{{this.location[0]}},{{this.location[1]}}],
{
icon: new L.Icon.Default()
}
)
.addTo({{this._parent.get_name()}});
{% endmacro %}
""")
def _get_self_bounds(self):
"""Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]]
"""
return [[self.location[0], self.location[1]],
[self.location[0], self.location[1]]]
class Popup(Element):
"""Create a Popup instance that can be linked to a Layer.
Parameters
----------
html: string or Element
Content of the Popup.
max_width: int, default 300
The maximal width of the popup.
"""
def __init__(self, html=None, max_width=300):
super(Popup, self).__init__()
self._name = 'Popup'
self.header = Element()
self.html = Element()
self.script = Element()
self.header._parent = self
self.html._parent = self
self.script._parent = self
if isinstance(html, Element):
self.html.add_child(html)
elif isinstance(html, text_type) or isinstance(html, binary_type):
self.html.add_child(Html(text_type(html)))
self.max_width = max_width
self._template = Template(u"""
var {{this.get_name()}} = L.popup({maxWidth: '{{this.max_width}}'});
{% for name, element in this.html._children.items() %}
var {{name}} = $('{{element.render(**kwargs).replace('\\n',' ')}}')[0];
{{this.get_name()}}.setContent({{name}});
{% endfor %}
{{this._parent.get_name()}}.bindPopup({{this.get_name()}});
{% for name, element in this.script._children.items() %}
{{element.render()}}
{% endfor %}
""") # noqa
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
for name, child in self._children.items():
child.render(**kwargs)
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
figure.script.add_child(Element(
self._template.render(this=self, kwargs=kwargs)),
name=self.get_name())
class FitBounds(MacroElement):
"""Fit the map to contain a bounding box with the
maximum zoom level possible.
Parameters
----------
bounds: list of (latitude, longitude) points
Bounding box specified as two points [southwest, northeast]
padding_top_left: (x, y) point, default None
Padding in the top left corner. Useful if some elements in
the corner, such as controls, might obscure objects you're zooming
to.
padding_bottom_right: (x, y) point, default None
Padding in the bottom right corner.
padding: (x, y) point, default None
Equivalent to setting both top left and bottom right padding to
the same value.
max_zoom: int, default None
Maximum zoom to be used.
"""
def __init__(self, bounds, padding_top_left=None,
padding_bottom_right=None, padding=None, max_zoom=None):
super(FitBounds, self).__init__()
self._name = 'FitBounds'
self.bounds = json.loads(json.dumps(bounds))
options = {
'maxZoom': max_zoom,
'paddingTopLeft': padding_top_left,
'paddingBottomRight': padding_bottom_right,
'padding': padding,
}
self.fit_bounds_options = json.dumps({key: val for key, val in
options.items() if val},
sort_keys=True)
self._template = Template(u"""
{% macro script(this, kwargs) %}
{% if this.autobounds %}
var autobounds = L.featureGroup({{ this.features }}).getBounds()
{% endif %}
{{this._parent.get_name()}}.fitBounds(
{% if this.bounds %}{{ this.bounds }}{% else %}"autobounds"{% endif %},
{{ this.fit_bounds_options }}
);
{% endmacro %}
""") # noqa
|
the-stack_106_30090 | import datetime
from typing import Optional, List
from django.db.models import Prefetch
from essentials_kit_management.models \
import OrderItem, Form, Brand, Item, Section, User
from essentials_kit_management.interactors.storages.dtos \
import FormDto, CompleteFormDetailsDto, OrderedItemDto, SectionDto, \
ItemDto, CompleteFormDetailsDto, BrandDto
from essentials_kit_management.constants.constants \
import DEFAULT_DATE_TIME_FORMAT
from essentials_kit_management.constants.enums import StatusEnum
class StorageImplementation():
def get_forms_details_as_list(self, user_id: int) -> List[FormDto]:
user = User.objects.filter(id=user_id).first()
user_forms = user.forms.all()
print(user_forms)
forms_list_dtos = self._convert_forms_into_forms_list_dtos(user_forms)
return forms_list_dtos
def get_ordered_items_dtos(self) -> List[OrderedItemDto]:
ordered_items = OrderItem.objects.all()
ordered_item_dtos = \
self._convert_ordered_items_into_dtos_list(ordered_items)
return ordered_item_dtos
def get_form_details_dto(self, form_id: int) -> CompleteFormDetailsDto:
items_query = Item.objects.prefetch_related(
Prefetch('brands', to_attr='item_brands')
)
sections_query = Section.objects.prefetch_related(
Prefetch('items', queryset=items_query, to_attr='section_items')
)
form = Form.objects.prefetch_related(
Prefetch(
'sections', queryset=sections_query, to_attr='form_sections'
)
).get(id=form_id)
form_details_dto = self._convert_form_into_dto(form)
return form_details_dto
def validate_form_id(self, form_id: int) -> bool:
is_valid_form = Form.objects.filter(id=form_id).exists()
return is_valid_form
def get_ordered_items_dtos_of_form(
self, form_id: int) -> List[OrderedItemDto]:
items_query = Item.objects.prefetch_related(
Prefetch('ordered_items', to_attr='filtered_ordered_items')
)
sections_query = Section.objects.prefetch_related(
Prefetch('items', queryset=items_query, to_attr='section_items')
)
form = Form.objects.prefetch_related(
Prefetch(
'sections', queryset=sections_query, to_attr='form_sections'
)
).get(id=form_id)
form_sections = form.form_sections
items_of_form = self._get_items_in_all_section(form_sections)
ordered_items = self._filter_ordered_items_from_items(items_of_form)
print(ordered_items)
ordered_items_dtos = self._convert_ordered_items_into_dtos_list(
ordered_items
)
return ordered_items_dtos
def _convert_forms_into_forms_list_dtos(self, forms):
form_list_dtos = []
for form in forms:
is_valid_close_date = self._validate_datetime(form.close_date)
if is_valid_close_date:
is_form_closed = datetime.datetime.now() >= form.close_date
if is_form_closed:
self._update_close_date_and_status_of_form(form)
FormDto(
form_id=form.id, form_name=form.name,
form_description=form.description, form_status=form.status,
close_date=form.close_date,
expected_delivery_date=form.expected_delivery_date
)
return form_list_dtos
def _convert_ordered_items_into_dtos_list(self, ordered_items):
ordered_item_dtos = [
OrderedItemDto(
ordered_item_id=ordered_item.id,
user_id=ordered_item.user_id,
item_id=ordered_item.item_id,
brand_id=ordered_item.brand_id,
form_id=self._get_form_id_related_to_item(
ordered_item.item_id
),
item_price=self._get_item_price_related_to_brand(
ordered_item.brand_id
),
ordered_quantity=ordered_item.ordered_quantity,
delivered_quantity=ordered_item.delivered_quantity,
is_out_of_stock=ordered_item.is_out_of_stock
)
for ordered_item in ordered_items
]
return ordered_item_dtos
@staticmethod
def _get_form_id_related_to_item(item_id):
item = Item.objects.filter(
id=item_id
).select_related('section').first()
section = item.section
form_id = section.form_id
return form_id
@staticmethod
def _get_item_price_related_to_brand(brand_id):
brand = Brand.objects.filter(id=brand_id).first()
return brand.price
def _convert_form_into_dto(self, form):
form_sections = form.form_sections
section_items = self._get_items_in_all_section(form_sections)
item_brands = self._get_brands_in_all_items(section_items)
form_section_dtos = self._convert_sections_into_dtos(form_sections)
section_item_dtos = self._convert_items_into_dtos(section_items)
item_brand_dtos = self._convert_brands_into_dtos(item_brands)
complete_form_details_dto = CompleteFormDetailsDto(
form_id=form.id,
form_description=form.description,
section_dtos=form_section_dtos,
item_dtos=section_item_dtos,
brand_dtos=item_brand_dtos
)
return complete_form_details_dto
@staticmethod
def _convert_sections_into_dtos(sections):
section_dtos = [
SectionDto(
section_id=section.id,
form_id=section.form_id,
product_title=section.title,
product_description=section.description
)
for section in sections
]
return section_dtos
@staticmethod
def _convert_items_into_dtos(items):
item_dtos = [
ItemDto(
item_id=item.id,
section_id=item.section_id,
item_name=item.name,
item_description=item.description,
)
for item in items
]
return item_dtos
@staticmethod
def _convert_brands_into_dtos(brands):
brand_dtos = [
BrandDto(
brand_id=brand.id,
item_id=brand.item_id,
brand_name=brand.name,
item_price=brand.price,
min_quantity=brand.min_quantity,
max_quantity=brand.max_quantity
)
for brand in brands
]
return brand_dtos
@staticmethod
def _get_items_in_all_section(sections):
items = []
for section in sections:
items_of_section = section.section_items
items = items + items_of_section
return items
@staticmethod
def _get_brands_in_all_items(items):
brands = []
for item in items:
brands_of_item = item.item_brands
brands = brands + brands_of_item
return brands
@staticmethod
def _filter_ordered_items_from_items(items):
ordered_items = OrderItem.objects.filter(item_id__in=items)
return ordered_items
@staticmethod
def _update_close_date_and_status_of_form(form):
form.close_date = None
form.status = StatusEnum.CLOSED.value
form.save()
@staticmethod
def _validate_datetime(datetime_obj):
is_invalid_datetime = datetime_obj is None
is_valid_datetime = not is_invalid_datetime
return is_valid_datetime
|
the-stack_106_30094 | # -*- coding: utf-8 -*-
# GUI Application automation and testing library
# Copyright (C) 2006-2019 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Linux AtspiElementInfo class"""
from .atspi_objects import AtspiAccessible, AtspiComponent, AtspiStateEnum, AtspiAction, AtspiValue, \
IATSPI
from ..element_info import ElementInfo
class AtspiElementInfo(ElementInfo):
"""Search class and hierarchy walker for AT-SPI elements"""
atspi_accessible = AtspiAccessible()
re_props = ["class_name", "name", "control_type"]
exact_only_props = ["handle", "pid", "control_id", "visible", "enabled", "rectangle",
"framework_id", "framework_name", "atspi_version", "runtime_id", "description"]
search_order = ["handle", "control_type", "class_name", "pid", "control_id",
"visible", "enabled", "name", "rectangle",
"framework_id", "framework_name", "atspi_version", "runtime_id", "description"]
# "auto_id", "full_control_type"
assert set(re_props + exact_only_props) == set(search_order)
renamed_props = {
"title": ("name", None),
"title_re": ("name_re", None),
"process": ("pid", None),
"visible_only": ("visible", {True: True, False: None}),
"enabled_only": ("enabled", {True: True, False: None}),
"top_level_only": ("depth", {True: 1, False: None}),
}
def __init__(self, handle=None):
"""Create element by handle (default is root element)"""
if handle is None:
self._handle = self.atspi_accessible.get_desktop(0)
else:
self._handle = handle
# Cache non-mutable element IDs
self._pid = self.atspi_accessible.get_process_id(self._handle, None)
self._root_id = self.atspi_accessible.get_id(self._handle, None)
self._runtime_id = self.atspi_accessible.get_index_in_parent(self._handle, None)
def __get_elements(self, root, tree, **kwargs):
tree.append(root)
for el in root.children(**kwargs):
self.__get_elements(el, tree, **kwargs)
def __hash__(self):
"""Return a unique hash value based on the element's handle"""
return hash((self._pid, self._root_id, self._runtime_id))
def __eq__(self, other):
"""Check if two AtspiElementInfo objects describe the same element"""
if not isinstance(other, AtspiElementInfo):
return False
if self.control_type == "Application" and other.control_type == "Application":
return self.process_id == other.process_id
return self.rectangle == other.rectangle
def __ne__(self, other):
"""Check if two AtspiElementInfo objects describe different elements"""
return not (self == other)
@staticmethod
def _get_states_as_string(states):
string_states = []
for i, state in AtspiStateEnum.items():
if states & (1 << i):
string_states.append(state)
return string_states
@property
def handle(self):
"""Return the handle of the window"""
return self._handle
@property
def name(self):
"""Return the text of the window"""
return self.atspi_accessible.get_name(self._handle, None).decode(encoding='UTF-8')
@property
def control_id(self):
"""Return the ID of the window"""
return self.atspi_accessible.get_role(self._handle, None)
@property
def runtime_id(self):
"""Return the runtime ID of the element"""
return self._runtime_id
@property
def process_id(self):
"""Return the ID of process that controls this window"""
return self._pid
pid = process_id
@property
def class_name(self):
"""Return the class name of the element"""
role = self.atspi_accessible.get_role_name(self._handle, None)
return "".join([part.capitalize() for part in role.decode("utf-8").split()])
@property
def rich_text(self):
"""Return the text of the element"""
return self.name
@property
def control_type(self):
"""Return the class name of the element"""
role_id = self.atspi_accessible.get_role(self._handle, None)
try:
return IATSPI().known_control_type_ids[role_id]
except KeyError:
raise NotImplementedError('Unknown role ID has been retrieved: {0}'.format(role_id))
@property
def parent(self):
"""Return the parent of the element"""
if self == AtspiElementInfo():
return None
return AtspiElementInfo(self.atspi_accessible.get_parent(self._handle, None))
def children(self, **kwargs):
"""Return children of the element"""
process = kwargs.get("process", None)
class_name = kwargs.get("class_name", None)
title = kwargs.get("title", None)
control_type = kwargs.get("control_type", None)
cnt = self.atspi_accessible.get_child_count(self._handle, None)
childrens = []
for i in range(cnt):
child = AtspiElementInfo(self.atspi_accessible.get_child_at_index(self._handle, i, None))
if class_name is not None and class_name != child.class_name:
continue
if title is not None and title != child.rich_text:
continue
if control_type is not None and control_type != child.control_type:
continue
if process is not None and process != child.process_id:
continue
childrens.append(child)
return childrens
@property
def component(self):
component = self.atspi_accessible.get_component(self._handle)
return AtspiComponent(component)
def descendants(self, **kwargs):
"""Return descendants of the element"""
tree = []
for obj in self.children(**kwargs):
self.__get_elements(obj, tree, **kwargs)
depth = kwargs.get("depth", None)
tree = self.filter_with_depth(tree, self, depth)
return tree
def description(self):
return self.atspi_accessible.get_description(self._handle, None).decode(encoding='UTF-8')
def framework_id(self):
return self.atspi_accessible.get_toolkit_version(self._handle, None).decode(encoding='UTF-8')
def framework_name(self):
return self.atspi_accessible.get_toolkit_name(self._handle, None).decode(encoding='UTF-8')
def atspi_version(self):
return self.atspi_accessible.get_atspi_version(self._handle, None).decode(encoding='UTF-8')
def get_layer(self):
"""Return rectangle of element"""
if self.control_type == "Application":
return self.children()[0].get_layer()
return self.component.get_layer()
def get_order(self):
if self.control_type == "Application":
return self.children()[0].get_order()
return self.component.get_mdi_z_order()
def get_state_set(self):
val = self.atspi_accessible.get_state_set(self.handle)
return self._get_states_as_string(val.contents.states)
def get_action(self):
if self.atspi_accessible.is_action(self.handle):
return AtspiAction(self.atspi_accessible.get_action(self.handle))
else:
return None
def get_value_property(self):
return AtspiValue(self.atspi_accessible.get_value(self.handle))
@property
def visible(self):
states = self.get_state_set()
if self.control_type == "Application":
children = self.children()
if children:
states = children[0].get_state_set()
else:
return False
return "STATE_VISIBLE" in states and "STATE_SHOWING" in states and "STATE_ICONIFIED" not in states
def set_cache_strategy(self, cached):
"""Set a cache strategy for frequently used attributes of the element"""
pass # TODO: implement a cache strategy for atspi elements
@property
def enabled(self):
states = self.get_state_set()
if self.control_type == "Application":
states = self.children()[0].get_state_set()
return "STATE_ENABLED" in states
@property
def rectangle(self):
"""Return rectangle of element"""
if self.control_type == "Application":
# Application object have`t rectangle. It`s just a fake container which contain base application
# info such as process ID, window name etc. Will return application frame rectangle
return self.children()[0].rectangle
return self.component.get_rectangle(coord_type="screen")
|
the-stack_106_30095 | from .random_flip import RandomFlip, Flip
from .random_affine import RandomAffine, Affine
from .random_anisotropy import RandomAnisotropy
from .random_elastic_deformation import (
RandomElasticDeformation,
ElasticDeformation,
)
__all__ = [
'RandomFlip',
'Flip',
'RandomAffine',
'Affine',
'RandomAnisotropy',
'RandomElasticDeformation',
'ElasticDeformation',
]
|
the-stack_106_30096 | # Create a "spring" using the rotational extrusion filter.
#
import pyvista
profile = pyvista.Polygon(center=[1.25, 0.0, 0.0], radius=0.2,
normal=(0, 1, 0), n_sides=30)
extruded = profile.extrude_rotate(resolution=360, translation=4.0,
dradius=.5, angle=1500.0)
extruded.plot(smooth_shading=True)
#
# Create a "wine glass" using the rotational extrusion filter.
#
import numpy as np
points = np.array([[-0.18, 0, 0],
[-0.18, 0, 0.01],
[-0.18, 0, 0.02],
[-0.01, 0, 0.03],
[-0.01, 0, 0.04],
[-0.02, 0, 0.5],
[-0.05, 0, 0.75],
[-0.1, 0, 0.8],
[-0.2, 0, 1.0]])
spline = pyvista.Spline(points, 30)
extruded = spline.extrude_rotate(resolution=20)
extruded.plot(color='tan')
|
the-stack_106_30098 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unicodecsv as csv
from django.conf.urls import url
from django.contrib import messages
from django.forms.forms import pretty_name
from django.forms.models import inlineformset_factory, modelform_factory
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.html import format_html
from .forms import ImportCSVForm
class UploadCSVAdminMixin(object):
change_form_template = 'admin/inline_csv_importer/change_form.html'
def get_urls(self):
urls = super(UploadCSVAdminMixin, self).get_urls()
my_urls = [
url(
r'^(\d+)/import-inline-csv/$',
self.import_inline_csv,
name='import-inline-csv'
),
]
return my_urls + urls
def format_csv_inline(self):
""" Outputs formatted csv_inline. """
csv_inline = {}
for line in self.csv_inline:
csv_inline['name'] = self.csv_inline[0][0]
csv_inline.update(self.csv_inline[0][1])
return csv_inline
def do_checks(self):
"""
Do some checks to make sure that defined tupe or lists is in the right format.
"""
message = None
if not hasattr(self, 'csv_inline'):
message = format_html(
'Please define <b>csv_inline</b> if you want import from csv.'
)
elif not isinstance(self.csv_inline[0], (list, tuple)):
message = format_html(
'{}.csv_inline must be list or tuple.'.format(self.__class__.__name__)
)
elif len(self.csv_inline) > 1:
message = format_html(
'{}.csv_inline can\'t be more than one set.'.format(self.__class__.__name__)
)
elif not self.csv_inline[0][1].get('inline'):
message = format_html(
'{}.csv_inline please define <b>inline</b>.'.format(self.__class__.__name__)
)
return message
def get_inline_model_form(self):
""" Build model form for inline model. """
return modelform_factory(
model=self.pretty_csv_inline['inline'].model,
fields=self.pretty_csv_inline['fields']
)
def build_formset(self, model_form, extra=0):
""" Build formset. """
formset = inlineformset_factory(
parent_model=self.model,
model=self.pretty_csv_inline['inline'].model,
form=model_form,
extra=extra,
)
return formset
def import_inline_csv(self, request, obj_id):
form = None
formset = None
initial_data = []
headers = []
# Do checks on defined csv_inline fieldset.
message = self.do_checks()
if message:
messages.error(request, message)
return HttpResponseRedirect('../')
self.pretty_csv_inline = self.format_csv_inline()
opts = {
'verbose_name': self.model._meta.verbose_name,
'verbose_name_plural': self.model._meta.verbose_name_plural,
'app_label': self.model._meta.app_label,
'object_name': self.model._meta.model_name,
}
confirmed = request.POST.get('confirmed', False)
if request.method == 'POST':
# Build inline formset.
model_form = self.get_inline_model_form()
if request.FILES.get('csv_file'):
csv_file = request.FILES['csv_file']
csv_file = csv.reader(csv_file)
# Skip headers
next(csv_file, None)
# Make headers pretty.
headers = map(pretty_name, self.pretty_csv_inline['fields'])
for row in csv_file:
# Zip values from csv row to defined fields in csv_inline
zipped_data = dict(zip(self.pretty_csv_inline['fields'], row))
initial_data.append(zipped_data)
# Build formset.
formset = self.build_formset(model_form, extra=len(initial_data))
formset = formset(initial=initial_data)
else:
formset = self.build_formset(model_form)
formset = formset(request.POST)
if formset.is_valid():
obj = self.get_object(request, obj_id)
formset.instance = obj
formset.save()
messages.success(request, 'Imported successfully.')
return HttpResponseRedirect('../')
else:
form = ImportCSVForm()
if self.pretty_csv_inline.get('help_text'):
form['csv_file'].help_text = self.pretty_csv_inline['help_text']
return render_to_response(
'admin/inline_csv_importer/inline_csv_importer.html',
{
'title': 'Import data',
'root_path': 'admin',
'app_label': opts['app_label'],
'opts': opts,
'form': form,
'confirmed': confirmed,
'formset': formset,
'headers': headers,
'initial_data': initial_data,
},
RequestContext(request)
)
|
the-stack_106_30101 | import os
import numpy
from PyQt5 import QtWidgets, QtGui, QtCore
from cryspy_editor.b_rcif_to_cryspy import L_ITEM_CLASS, L_LOOP_CLASS, L_DATA_CLASS
from .FUNCTIONS import show_info, get_layout_method_help, make_qtablewidget_for_data_constr, show_widget, add_mandatory_optional_obj
from cryspy.common.cl_item_constr import ItemConstr
from cryspy.common.cl_loop_constr import LoopConstr
def w_for_data_constr(obj, layout_11, layout_12, layout_13, layout_2, layout_3, w_output, thread):
layout_11.addWidget(QtWidgets.QLabel("Data name:"))
_l_e_dist = QtWidgets.QLineEdit()
_l_e_dist.setText(obj.data_name)
_l_e_dist.editingFinished.connect(lambda : setattr(obj, "data_name", _l_e_dist.text()))
layout_11.addWidget(_l_e_dist)
layout_11.addWidget(QtWidgets.QLabel("Defined attributes:"))
_widg_table = make_qtablewidget_for_data_constr(obj, w_output, thread)
layout_11.addWidget(_widg_table)
_b_info = QtWidgets.QPushButton("info")
_b_info.clicked.connect(lambda : show_info(obj, w_output))
layout_11.addWidget(_b_info)
lay_left_2 = add_mandatory_optional_obj(obj, w_output, thread)
layout_12.addLayout(lay_left_2)
lay_left_3 = get_layout_method_help(obj, w_output, thread)
layout_13.addLayout(lay_left_3)
_text_edit = QtWidgets.QTextEdit()
_text_edit.setText(obj.to_cif())
layout_2.addWidget(_text_edit)
return
def create_obj(widg, obj):
ls_out = ["Enter the item:"]
l_class = obj.mandatory_classes+obj.optional_classes
n_mandatory = len(obj.mandatory_classes)
l_h = []
for _class in l_class:
if isinstance(_class, ItemConstr):
l_h.append(_class.PREFIX, _class)
elif isinstance(_class, LoopConstr):
l_h.append(_class.ITEM_CLASS.PREFIX, _class)
ls_out.extend([f"{_i+1}: {_h[0]:}" for _i, _h in enumerate(l_h)])
text, ok = QtWidgets.QInputDialog.getText(widg, 'Input Dialog',
"\n".join(ls_out))
if not(ok):
return None
_ind = int(text)-1
item_class = l_h[_ind][1]
item = item_class()
if _ind >= n_mandatory:
obj.optional_objs.append(item)
else:
obj.mandatory_objs.append(item)
def create_items(widg, obj):
ls_out = ["Enter the item:"]
ls_out.extend([f"{_i+1}: {_item.PREFIX:}" for _i, _item in enumerate(L_ITEM_CLASS)])
text, ok = QtWidgets.QInputDialog.getText(widg, 'Input Dialog',
"\n".join(ls_out))
if not(ok):
return None
_ind = int(text)-1
item_class = L_ITEM_CLASS[_ind]
item = item_class()
if not(item_class in obj.mandatory_classes+obj.optional_classes):
obj.optional_classes.append(item_class)
obj.optional_objs.append(item)
def create_loop(widg, obj):
ls_out = ["Enter the loop:"]
ls_out.extend([f"{_i+1}: {_item.ITEM_CLASS.PREFIX:}" for _i, _item in enumerate(L_LOOP_CLASS)])
text, ok = QtWidgets.QInputDialog.getText(widg, 'Input Dialog',
"\n".join(ls_out))
if not(ok):
return None
_ind = int(text)-1
loop_class = L_LOOP_CLASS[_ind]
loop = loop_class()
if not(loop_class in obj.mandatory_classes+obj.optional_classes):
obj.optional_classes.append(loop_class)
obj.optional_objs.append(loop)
|
the-stack_106_30102 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
import pandas as pd
import argparse
import os
import time
import sys
import pwd
import csv
import re
import deepchem
import pickle
import dcCustom
from dcCustom.molnet.preset_hyper_parameters import hps
from dcCustom.molnet.run_benchmark_models import model_regression, model_classification
from dcCustom.molnet.check_availability import CheckFeaturizer, CheckSplit
def load_metz(featurizer = 'Weave', cross_validation=False, test=False, split='random',
reload=True, K = 5, mode = 'regression', predict_cold = False, cold_drug=False,
cold_target=False, split_warm=False, filter_threshold=0, prot_seq_dict=None):
# The last parameter means only splitting into training and validation sets.
if cross_validation:
assert not test
if mode == 'regression' or mode == 'reg-threshold':
mode = 'regression'
tasks = ['metz']
file_name = "restructured_unique.csv"
elif mode == 'classification':
tasks = ['metz_bin']
file_name = "restructured_bin.csv"
data_dir = "metz_data/"
if reload:
delim = "/"
if filter_threshold > 0:
delim = "_filtered" + delim
if predict_cold:
delim = "_cold" + delim
elif split_warm:
delim = "_warm" + delim
elif cold_drug:
delim = "_cold_drug" + delim
elif cold_target:
delim = "_cold_target" + delim
if cross_validation:
delim = "_CV" + delim
save_dir = os.path.join(data_dir, featurizer + delim + mode + "/" + split)
loaded, all_dataset, transformers = dcCustom.utils.save.load_cv_dataset_from_disk(
save_dir, K)
else:
save_dir = os.path.join(data_dir, featurizer + delim + mode + "/" + split)
loaded, all_dataset, transformers = deepchem.utils.save.load_dataset_from_disk(
save_dir)
if loaded:
return tasks, all_dataset, transformers
dataset_file = os.path.join(data_dir, file_name)
if featurizer == 'Weave':
featurizer = dcCustom.feat.WeaveFeaturizer()
elif featurizer == 'ECFP':
featurizer = dcCustom.feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer = dcCustom.feat.ConvMolFeaturizer()
loader = dcCustom.data.CSVLoader(
tasks = tasks, smiles_field="smiles", protein_field = "proteinName",
source_field = 'protein_dataset', featurizer=featurizer, prot_seq_dict=prot_seq_dict)
dataset = loader.featurize(dataset_file, shard_size=8192)
if mode == 'regression':
transformers = [
deepchem.trans.NormalizationTransformer(
transform_y=True, dataset=dataset)
]
elif mode == 'classification':
transformers = [
deepchem.trans.BalancingTransformer(transform_w=True, dataset=dataset)
]
print("About to transform data")
for transformer in transformers:
dataset = transformer.transform(dataset)
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': dcCustom.splits.RandomSplitter(split_cold=predict_cold, cold_drug=cold_drug,
cold_target=cold_target, split_warm=split_warm, prot_seq_dict=prot_seq_dict,
threshold=filter_threshold),
'scaffold': deepchem.splits.ScaffoldSplitter(),
'butina': deepchem.splits.ButinaSplitter(),
'task': deepchem.splits.TaskSplitter()
}
splitter = splitters[split]
if test:
train, valid, test = splitter.train_valid_test_split(dataset)
all_dataset = (train, valid, test)
if reload:
deepchem.utils.save.save_dataset_to_disk(save_dir, train, valid, test,
transformers)
elif cross_validation:
fold_datasets = splitter.k_fold_split(dataset, K)
all_dataset = fold_datasets
if reload:
dcCustom.utils.save.save_cv_dataset_to_disk(save_dir, all_dataset, K, transformers)
else:
# not cross validating, and not testing.
train, valid, test = splitter.train_valid_test_split(dataset, frac_valid=0.2,
frac_test=0)
all_dataset = (train, valid, test)
if reload:
deepchem.utils.save.save_dataset_to_disk(save_dir, train, valid, test,
transformers)
return tasks, all_dataset, transformers |
the-stack_106_30104 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.framework.tensor import register_tensor_op
class MaskedFill(Module):
def __init__(self, value) -> None:
super().__init__()
self.value = value
self._where_op = (
flow.builtin_op("where")
.Input("condition")
.Input("x")
.Input("y")
.Output("out")
.Build()
)
def forward(self, input, mask):
in_shape = tuple(input.shape)
value_like_x = flow.Tensor(*in_shape, device=input.device)
value_like_x.fill_(self.value)
return self._where_op(mask, value_like_x, input)[0]
@oneflow_export("masked_fill")
@register_tensor_op("masked_fill")
@experimental_api
def masked_fill_op(tensor, mask, value):
r"""
Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is True.
The shape of :attr:`mask` must be broadcastable with the shape of the underlying tensor.
Args:
mask (BoolTensor): the boolean mask
value (float): the value to fill in with
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> in_arr = np.array(
... [[[-0.13169311, 0.97277078, 1.23305363, 1.56752789],
... [-1.51954275, 1.87629473, -0.53301206, 0.53006478],
... [-1.38244183, -2.63448052, 1.30845795, -0.67144869]],
... [[ 0.41502161, 0.14452418, 0.38968 , -1.76905653],
... [ 0.34675095, -0.7050969 , -0.7647731 , -0.73233418],
... [-1.90089858, 0.01262963, 0.74693893, 0.57132389]]]
... )
>>> fill_value = 8.7654321 # random value e.g. -1e9 3.1415
>>> input = flow.Tensor(in_arr, dtype=flow.float32)
>>> mask = flow.Tensor((in_arr > 0).astype(np.int8), dtype=flow.int)
>>> output = flow.masked_fill(input, mask, fill_value)
# tensor([[[-0.1317, 8.7654, 8.7654, 8.7654],
# [-1.5195, 8.7654, -0.533 , 8.7654],
# [-1.3824, -2.6345, 8.7654, -0.6714]],
# [[ 8.7654, 8.7654, 8.7654, -1.7691],
# [ 8.7654, -0.7051, -0.7648, -0.7323],
# [-1.9009, 8.7654, 8.7654, 8.7654]]], dtype=oneflow.float32)
"""
return MaskedFill(value)(tensor, mask)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
|
the-stack_106_30105 | """
Copyright (C) 2017-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from math import ceil
# Concat infer : N - number of inputs to concat
# axis - dimension number for tensors concatenation
import numpy as np
from mo.graph.graph import Node, Graph
from mo.ops.op import Op
class CorrelationOp(Op):
op = 'Correlation'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': __class__.op,
'op': __class__.op,
'version': 'extension',
'in_ports_count': 1,
'out_ports_count': 1,
'infer': CorrelationOp.corr_infer
}
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
return [
'pad',
'kernel_size',
'max_displacement',
'stride_1',
'stride_2',
'single_direction',
'do_abs',
'correlation_type'
]
@staticmethod
def corr_infer(node: Node):
outn = node.out_node(0)
inn = node.in_node(0)
outn.shape = np.zeros(4, dtype=int)
outn.shape[0] = inn.shape[0]
bottomchannels = inn.shape[1]
paddedbottomheight = inn.shape[2]
paddedbottomwidth = inn.shape[3] + 2 * node.pad
kernel_radius_ = (node.kernel_size - 1) / 2;
border_size_ = node.max_displacement + kernel_radius_
outn.shape[3] = ceil((float)(paddedbottomwidth - border_size_ * 2) / node.stride_1)
outn.shape[2] = ceil((float)(paddedbottomheight - kernel_radius_ * 2) / node.stride_1)
neighborhood_grid_radius_ = node.max_displacement / node.stride_2
if node.single_direction != 0:
neighborhood_grid_width_ = neighborhood_grid_radius_ + 1
else:
neighborhood_grid_width_ = neighborhood_grid_radius_ * 2 + 1
outn.shape[1] = neighborhood_grid_width_ * neighborhood_grid_width_
|
the-stack_106_30106 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_bert import BertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
persistent=False,
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.BertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
BERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> config.is_decoder = True
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = BertModel(config, add_pooling_layer=True)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs[1],
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see ``input_ids`` docstring). Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
the-stack_106_30108 | # Code taken from
# https://github.com/vn-ki/anime-downloader
# All rights to Vishnunarayan K I
import base64
import sys
from hashlib import md5
from Cryptodome import Random
from Cryptodome.Cipher import AES
from requests.utils import quote
BLOCK_SIZE = 16
#KEY = b"LXgIVP&PorO68Rq7dTx8N^lP!Fa5sGJ^*XK"
KEY = b"267041df55ca2b36f2e322d05ee2c9cf"
# From stackoverflow https://stackoverflow.com/questions/36762098/how-to-decrypt-password-from-javascript-cryptojs-aes-encryptpassword-passphras
def pad(data):
length = BLOCK_SIZE - (len(data) % BLOCK_SIZE)
return data + (chr(length)*length).encode()
def unpad(data):
return data[:-(data[-1] if type(data[-1]) == int else ord(data[-1]))]
def bytes_to_key(data, salt, output=48):
# extended from https://gist.github.com/gsakkis/4546068
assert len(salt) == 8, len(salt)
data += salt
key = md5(data).digest()
final_key = key
while len(final_key) < output:
key = md5(key + data).digest()
final_key += key
return final_key[:output]
def decrypt(encrypted, passphrase):
encrypted = base64.b64decode(encrypted)
assert encrypted[0:8] == b"Salted__"
salt = encrypted[8:16]
key_iv = bytes_to_key(passphrase, salt, 32+16)
key = key_iv[:32]
iv = key_iv[32:]
aes = AES.new(key, AES.MODE_CBC, iv)
return unpad(aes.decrypt(encrypted[16:]))
def decrypt_export(url):
decrypt_ed = decrypt((url).encode('utf-8'), KEY).decode('utf-8').lstrip(' ')
escap_ed = quote(decrypt_ed, safe='~@#$&()*!+=:;,.?/\'')
return escap_ed
if __name__ == '__main__':
if sys.argv:
if len(sys.argv[1:]) > 1:
# sending a file_name as the argument
# e.g: python3 decrypt.py file_name.txt anything ...
file_name = sys.argv[1]
with open(file_name) as fn:
for l in fn.readlines():
decrypt_ed = decrypt(l.encode('utf-8'), KEY).decode('utf-8').lstrip(' ')
# https://stackoverflow.com/a/6618858/8608146
escap_ed = quote(decrypt_ed, safe='~@#$&()*!+=:;,.?/\'')
print(escap_ed)
elif len(sys.argv[1:]) == 1:
decrypt_ed = decrypt((sys.argv[1]).encode('utf-8'), KEY).decode('utf-8').lstrip(' ')
escap_ed = quote(decrypt_ed, safe='~@#$&()*!+=:;,.?/\'')
print(escap_ed)
|
the-stack_106_30109 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 18 19:14:37 2019
@author: george
"""
import sys
import dill
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
warnings.filterwarnings("ignore", category=DeprecationWarning)
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from scipy.stats import multivariate_normal
from sklearn.cluster import KMeans
#from cvxopt import matrix
#from cvxopt.solvers import qp
from sklearn.linear_model import LogisticRegression
import random as rdm
import time
####THIS CODE HAS NUMERICAL ISSUES AT THE SPARCS DATASET
class SupervisedGMM():
"""
THIS CLASS IMPLEMENTS THE SUPERVISED GMM ALGORITHM
IT USES SOME PARTS OF SCIKIT LEARN TO ACCOMPLISH THIS
"""
def __init__(self, max_iter = 1000, cv = 10, mix = 0.2,
C = [20,50,200,500,2000,5000],
alpha = [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000 ],
max_iter2 = 10, penalty = 'l1', scoring = 'neg_log_loss',
solver = 'saga', n_clusters = 2, tol = 10**(-3 ) ,
mcov = 'diag', tol2 = 10**(-3), transduction = 0, adaR = 1,
verbose = 0, warm = 0, m_sparse = 0, m_sparseL = 10**(-3),
m_sp_it1 = 2, m_sp_it2 = 2, m_choice = 0,
m_LR = 0.001, m_mix = 1, altern = 0, log_reg = 'LG'):
""" MODEL PARAMETERES:
max_iter:[INT] #Number of epochs of SGD default 1000
cv:[INT] Cross Validation: Default 5 Fold
mix:{FLOAT] In what Percentages to Upadate Memberships in respect with
the previous iteration: Def --> 0.5
C: [LIST] Inverse of Regularization Parameter: DEF: 1000
alpha:[LIST] regularization parameters, for the stochastic
gradient descend cross validation
max_iter2: Maximum # of EM Iterations, DEF: 10
penalty:[STRING] Regularization type ( Default L1 )
scoring:[STRING] score to optimize in cross validation: DEF:
'negative_log_loss
solver: [STRING] DEF: 'saga', Solvers used by scikit learn
for logistic Regression
n_clusters:{INTEGER] #of Soft Clusters: DEF: 2
tol:[FLOAT] memberships convergence tolerance
tol2 =[FLOAT] stochastic gradient descent tolerance: Def 10^(-3)
mcov =[STRING] 'full' or 'diag', 'full' means full covariance,
'diag' means diagonal covariance
transduction:[BINARY]
If to use transductional logic or not: Default: 1(TR)
adaR:[BINARY] Adaptive regularization , regularize according to the
cluster size
verbose:[BINARY] information on the fitting of the algorithm and
other information
warm:[BINARY], if this is 1 we need to give during the fitting the
memberships, warm start, given memberships
m_sparse:{BINARY] take sparse means in the gaussians centers or not
m_sparseL: [FLOAT] the lambda for the means regularization
m_sp_it1: iterations to run the first sparse means algorith if
chosen
m_sp_it2: iterations to run the second sparse algorithm if chosen
m_choice:[BINARY] choice of sparse algorithm QP or Gardient Descend
m_LR: if the choice if Gradient descend pick the learning rate
m_mix:
altern: [BINARY] start using prediction driven approach when
memberships have converged with just mixture models
log_reg: [STRING], "SGD" stochastic gradient descend,
"LG" Logistic Regression
"""
######################################################################
# CLASS ATTRIBUTES
######################################################################
#ind1 and ind2 are optional feature selection parameters that might
#be specified in the fit method
self._ind1 = None
self._ind2 = None
#idx1, idx2 are indexes created if we do a split and explained in
#the split method
self._idx1 = None
self._idx2 = None
#maximum number of epochs for SGD
self._max_iter = max_iter
#Number of Folds for Cross Validation
self._cv = cv
#Mixing Coefficient
self._mix = mix
#NOT USED ANY MORE
self._C = C
#List with regularization parameters for cross validation
self._alpha = alpha
#Nuber of iterations of the EM algorithm
self._max_iter2 = max_iter2
#type of penalty for logistic regression
self._penalty = penalty
#type of scoring for cross validation
self._scoring = scoring
#type of scikit learn solver for SGD
self._solver = solver
#nimber of clusters to use
self._n_clusters = n_clusters
#tolerance for the SGD agorithm
self._tol = tol
#tolerance for the membership convergence
self._tol2 = tol2
#variable for the type pf covariance for the gaussians
self._mcov = mcov
#use transuction or not
self._trans = transduction
#use adaptive regularization of not
self._adaR = adaR
#verbose or not
self._vb = verbose
#warm : warm start or not
self._warm = warm
self._m_sparse = m_sparse
self._m_sparseL = m_sparseL
self._m_sp_it1 = m_sp_it1
self._m_sp_it2 = m_sp_it2
self._m_choice = m_choice
self._m_LR = m_LR
self._m_mix = m_mix
self._altern = altern
self._log_reg = log_reg
#FOR FIT INITIALIZE WITH KMEANS THE MEMBERSHIPS
self._KMeans = None
######################################################################
#THE FOLLOWING ATTRIBUTES ARE SETTED AFTER FITTING THE ALGORITHM
#TO DATA
#PARAMETER TO BE SETTED AFTER THE MODELS IS FITTED
#A list of the Gaussians After fitting the data
#they can be used to predict memebership of a new data points
self.Gmms = None
#when we fit a mixture of bernulis the means of the bernullis
self.Bers = None
#mixture coefficients of Gaussians or/and Bernullis (to do )
self.mixes = None
self.mixesB = None
#A list of Logistic regression predictors one for each class
self.LogRegr = None
#PARAMETERS OF THE GAUSSIANS MODELS
#list of covariances matrixes, list of means, list of mixes
#probability matrix "Gauss membershipfor train and test if test iused
#Gmms list of Gaussian predictors from Scipy Class
self.params = None
#PARAMETERS AFTER FITTING THE MODEL
#fitParams = {'mTrain' : mTrain, 'mTest': mTest, 'labTest': testlabels,
# 'labTrain' : trainlabels }
#memberships for training data and testing data
#hard cluster labels for training and testing data
self.fitParams = None
#GAUSSIAN'S MEANS AND COVARINACES
self.means = None
self.cov = None
#LOGISTIC REGRESSION WEIGHTS
self.weights = None
#TRAIN AND TEST MEMBERSHIPS SOFT
self.mTrain = None
self.mTest = None
#IF MODEL IS FITTED OR NOT
self.fitted = None
######################################################################
#TO DO
# INCORPORATE BERNULLIS AND GAUSSIANS TOGETHER IN THE SAME FIT
# FUNCTION
#GIVE THE BINARY DATA COLUMNS AND USE THESE FOR BERNULLIS AND THE
#REST WITH GAUSSIANS
#######################################################################
#HELPER
@classmethod
def loader(cls, filename):
"""
Initialize former model
"""
with open(filename, "rb") as f:
return dill.load(f)
def save(self, filename='sgmm_model_%s.pkl' % time.strftime("%Y%m%d-%H%M%S")):
"""
Save current model for future use
"""
with open(filename, "wb") as f:
return dill.dump(self, f)
def split(self, data = None, X = None, y = None, split = 0.2):
"""
A helper function to split data into training and test set
There are 2 choices, either Input a data numpy Array with the last
column be its labels or The data and the labels separately
data: Data with last column labels
X: Data
y: labels
split: the percentage of test data
returns: Xtrain, Xtest, ytrain, ytest, idx1, idx2
idx1: indexes taken for training data
idx2: indexes taken for test data
"""
# if (data and X and y ) is None:
# return "Input data or X and y "
if (X is None) and (y is None):
Xtrain, Xtest, ytrain, ytest , idx1, idx2 = \
train_test_split(data[:,:-1], data[:,-1],
np.arange( data.shape[0] ),
test_size = split, random_state = 1512,
stratify = y)
else:
Xtrain, Xtest, ytrain, ytest, idx1, idx2 = \
train_test_split(X, y,
np.arange( X.shape[0] ),
test_size = split, random_state = 1512,
stratify = y)
self.idx1 = idx1
self.idx2 = idx2
return Xtrain, Xtest, ytrain.astype(int), ytest.astype(int)
def fit(self, Xtrain = None, ytrain = None, Xtest = None, ind1 = None,
ind2 = None, mTrain1 = None, mTest1 = None,
kmeans = 0, mod = 1, simple = 0, comp_Lik = 0,
memb_mix = 0.0, hard_cluster = 0):
"""
Fit the Supervised Mixtures of Gaussian Model
ind1: chose the features to use in the training of the Ml model
ind2: chose the fetures to use in the training of the Gaussians
Xtrain: training data
ytrain: labels of training data
Xtest: testing data if tranduction is on
kmeans: kmeans initialization of memberships
mod: mode of computing the probabilities for gaussians, default
mod = 1
simple : binary variable to decide if you will use simple
mixture of gaussians plus classification [simple = 1], if
simple is 0 [simple = 0] then use prediction driven gaussians
for training ( the proposed model )
a third choice is use simple = 0 and set the altern variable from
the model to 1 this will use no prediction driven results till one
point and then it will alter to prediction driven
comp_Lik: (UNSTABLE) Compute Likelihood or not
memb_mix: parameter on how to mix the supervised along with the
memberships
hard_cluster: hard cluster memberships before the logistic regression
fitting.
"""
#CHECK IF ALL DATA ARE GIVEN
self.ind1 = ind1
self.ind2 = ind2
self.fitted = 1
self._KMeans = kmeans
if Xtrain is None or ytrain is None :
print(" Please Give Xtrain, ytrain, Xtest data ")
return
if ind1 is None:
#ALL FEATURES FOR PREDICTION IF ind1 NOT SPECIFIED
self.ind1 = np.arange( Xtrain.shape[1] )
ind1 = self.ind1
if ind2 is None:
#ALL FEATURES FOR CLUSTERING IF ind2 NOOT SPECIFIED
self.ind2 = np.arange( Xtrain.shape[1] )
ind2 = self.ind2
#PARAMETERES TO BE USED BY THE FIT FUNCTION
n_clusters = self._n_clusters
max_iter = self._max_iter
cv = self._cv
mix = self._mix
penalty = self._penalty
scoring = self._scoring
solver = self._solver
max_iter2 = self._max_iter2
trans = self._trans
C = self._C
alpha = self._alpha
tol = self._tol
tol2 = self._tol2
mcov = self._mcov
adaR = self._adaR
vb = self._vb
warm = self._warm
altern = self._altern
lg_regr = self._log_reg
dimXtrain = Xtrain.shape[0]
dimXtest = 0
if trans == 1:
dimXtest = Xtest.shape[0]
#regularize the sums for numerical instabilities
reg = 10**(-5)
#regularization to be added to every memebership entry
regk = reg/n_clusters
#INITIALIZE MEMBERSHIP FUNCTIONS
#WE KEEP SEPARATED TRAIN AND TEST MEMBERSHIPS
#BECAUSE TRAIN IS SUPERVISED MEMBERSHIP
#TEST IS UNSUPERVISED
mTrain, mTest = self.initializeMemb(warm, kmeans, dimXtrain, n_clusters,
regk, trans, simple, dimXtest, Xtrain, Xtest, mTrain1,
mTest1)
#NORMALIZE MEMBERSHIPS SO EACH ROW SUMS TO 1
sumTrain = np.sum( mTrain, axis = 1)
mTrain = ( mTrain.T / sumTrain ).T
if trans == 1:
sumTest = np.sum( mTest, axis = 1 )
mTest = ( mTest.T / sumTest ).T
#SET SOME PARAMETERES FOR USE IN THE FOR LOOP OF EM
indexing = np.arange( dimXtrain )
#MATRIX WITH LOGISTIC REGRESSION PROBABILITIES FOR EACH CLASS
logiProb = np.zeros([dimXtrain, n_clusters])
#MATRIX WITH LOG PROBABILITIES
logLogist = np.zeros([dimXtrain, n_clusters])
#variable related to the altern parametere
gate = 0
###START FITTING ALGORITHM ##################
#setting the cross validation grid
if lg_regr == 'SGD':
param_grid = {'alpha': alpha}
else:
param_grid = {'C': C}
Qold = 0 #initial likelihood (if we are to calculate it)
#STARTING EM ALGORITHM
for iter2 in np.arange( max_iter2 ): #OUTER EM ITERATIONS
#FITING THE L1 LOGISTIC REGRESSIONS
if simple == 0:
models, logiProb, logLogist = self.fitLogisticRegression(
n_clusters, mTrain, adaR, alpha, max_iter,
tol2, Xtrain, ytrain, vb, penalty, scoring,
cv, regk, ind1, indexing, logiProb, logLogist,
param_grid, lg_regr, C,
hard_cluster = hard_cluster )
else: #IF WE USE SIMPLE MIXTURES OF GAUSSIANS JUST FIT AT LAST ITER
if iter2 == ( max_iter2 - 1):
models, logiProb, logLogist = self.fitLogisticRegression(
n_clusters, mTrain, adaR, alpha, max_iter,
tol2, Xtrain, ytrain, vb, penalty, scoring,
cv, regk, ind1, indexing, logiProb, logLogist,
param_grid, lg_regr, C, hard_cluster = hard_cluster )
#WE TAKE THE MEMBERSHIPS AND ALL THE DATA
#TO FIT THE GAUSSIANS USING THE EM ALGORITHM FOR GMM
if trans == 1: #if we hve transduction
data = np.concatenate((Xtrain[:, ind2], Xtest[:, ind2]),
axis = 0)
mAll = np.concatenate( (mTrain, mTest ), axis = 0 )
else:
data = Xtrain[:, ind2]
mAll = mTrain
#take the parameters of the GMM models
#THIS PIECE OF CODE WILL BE REMOVED IN THE FUTURE
params = self.gmmModels( data, mAll, mcov )
gmmProb = params['probMat']
###################################################################
#THIS IS AFTER MODIFICATIONS#######################################
if mod == 1: #THIS IS THE MOD WE WILL KEEP IN THE FUTURE
gmmProb = params['probMat2']
gmmLogprob = params['logProb']
self.Gmms = params['Gmms']
self.mixes = params['pis']
#END OF MODIFICATION ##############################################
#CALCULATE NEW MEMBERSHIPS FOR TRAIN AND TEST
if simple and gate == 0: #NO PREDICTION DRIVEN (MoG + LR + L1)
mNewTrain = gmmProb[0: dimXtrain, :] + regk
else: #PREDICTION DRIVEN (SGMM)
mNewTrain = logiProb * gmmProb[0: dimXtrain, :] + regk
simple = 0
###################################################################
if trans:
mNewTest = gmmProb[dimXtrain :, :] + regk
#COMPUTE LIKELIHOOD IF COMP_LIK == 1
if mod and comp_Lik:
Qnew, Qdif = self.computeLikelihood( gmmLogprob, logLogist,
dimXtrain, vb, trans, simple, Qold)
Qold = Qnew
#END OF MODIFICATION ##############################################
#NORMALIZE NEWMEMBERSHIPS
sumTrain = np.sum( mNewTrain, axis = 1)
if trans == 1:
sumTest = np.sum( mNewTest, axis = 1 )
mNewTrain = ( mNewTrain.T / sumTrain ).T
if trans == 1:
mNewTest = ( mNewTest.T / sumTest ).T
#EVALUATE ERROR
errorTr = np.sum( np.abs( mTrain - mNewTrain) )
if trans == 1:
errorTst = np.sum( np.abs( mTest - mNewTest ) )
error = ( errorTr + errorTst )/( (dimXtrain + dimXtest)\
*n_clusters )
else:
error = errorTr/( dimXtrain * n_clusters )
if (error < 5*10**(-8)) and altern:
gate = 1
altern = 0
#MAKE A SOFT CHANGE IN MEMEBRSHIPS MIXING OLD WITH NEW
#MEMBERSHIPS WITH DEFAULT MIXING OF 0.5
mNewTrain = mNewTrain*(1-memb_mix) + \
self.predict_GMMS(Xtrain)*memb_mix
mTrain = mNewTrain*(1-mix) + mTrain*(mix)
if trans == 1:
mTest = mNewTest*(1-mix) + mTest*(mix)
print("GMM iteration: {}, error: {}".format(iter2, error))
if error < tol:
break
############ END OF EM UPDATES #######################################
#if simple and error < tol:
models, logiProb, logLogist = self.fitLogisticRegression(
n_clusters, mTrain, adaR, alpha, max_iter,
tol2, Xtrain, ytrain, vb, penalty, scoring,
cv, regk, ind1, indexing, logiProb, logLogist,
param_grid, lg_regr, C,
hard_cluster = hard_cluster )
self.Gmms = params['Gmms']
self.mixes = params['pis']
self.LogRegr = models
self.params = params
#TAKING HARD CLUSTERS IN CASE WE WANT TO USE LATER
if trans == 1:
testlabels = np.argmax( mTest, axis = 1 )
else:
testlabels = []
trainlabels = np.argmax( mTrain, axis = 1 )
fitParams = {'mTrain' : mTrain, 'mTest': mTest, 'labTest': testlabels,
'labTrain' : trainlabels }
self.mTrain = mTrain
if trans == 1:
self.mTest = mTest
self.fitParams = fitParams
#set the weights of LOGREG MEANS AND COVARIANCES OF GAUSSIANS
self.setWeights()
self.setGauss( params )
return self
#END OF FIT FUNCTION##################################################
def initializeMemb( self, warm, kmeans, dimXtrain, n_clusters,
regk, trans, simple, dimXtest, Xtrain, Xtest, mTrain1,
mTest1):
""" Function to initialize memberships,
warm: if we want a warm start ((provide mTrain1, mTest1))
kmeans: [binary] kmeans initialization or not
dimXtrain: number of training data
n_clusters: number of clusters we use
regk: amount of regularization for the divisions
trans: use transduction or not (if yes we need test data too )
simple: prediction driven or not
if we have trunsduction give the dimension of test data
Xtrain: training data
Xtest: testing data
mTrain1: given thatwe want a warm start give the initial memeberhsips
mTest1: given that we want a warm start give the initial memeberships
of test data
"""
if warm == 0: #IF WE DONT HAVE WARM START
if kmeans == 0: #NO KMEANS INITIALIZATION (RANDOM INIT)
mTrain = np.random.rand( dimXtrain, n_clusters) + regk
if trans == 1:
mTest = np.random.rand( dimXtest, n_clusters ) + regk
else:
mTest = []
else: #KMEANS INITIALIZATION
km = KMeans( n_clusters = n_clusters )
if simple == 0:
rdm.seed(0)
randsamp_train = rdm.sample(range(Xtrain.shape[0]),np.int(Xtrain.shape[0]*0.5))
if trans == 1:
#FIT KMEANS IN DATA (TEST AND TRAIN IF TRANSDUCTION)
randsamp_test = rdm.sample(range(Xtest.shape[0]),np.int(Xtest.shape[0]*0.5))
Xtot = np.concatenate( (Xtrain[randsamp_train,:] , Xtest[randsamp_test,:]), axis = 0)
km = km.fit( Xtot )
mAll = np.zeros([ dimXtrain +dimXtest, n_clusters])
labels = km.predict (np.concatenate( (Xtrain , Xtest), axis = 0))
else:
#FIT ONLY TRAIN IF NOT TRANSDUCTION, with 50% data
km.fit( Xtrain [randsamp_train,:])
mAll = np.zeros([ dimXtrain , n_clusters])
labels = km.predict ( Xtrain )
else:
if trans == 1:
#FIT KMEANS IN DATA (TEST AND TRAIN IF TRANSDUCTION)
km = km.fit( np.concatenate( (Xtrain, Xtest), axis = 0))
mAll = np.zeros([ dimXtrain +dimXtest, n_clusters])
else:
#FIT ONLY TRAIN IF NOT TRANSDUCTION
km.fit( Xtrain)
mAll = np.zeros([ dimXtrain , n_clusters])
#TAKE THE LABELS FROM KMEANS
labels = km.labels_
for j in np.arange( labels.shape[0] ): #MAKE THE MEMBERSHIPS
mAll[j, labels[j]] = 1
mTrain = mAll[0: dimXtrain ]
if trans == 1:
mTest = mAll[ dimXtrain :]
else:
mTest = []
else: #IF WE HAVE WARM START, ASSIGN WITH THE GIVEN MEMBERSHIPS
mTrain = mTrain1
mTest = mTest1
return mTrain, mTest
################### FITTING LOGISTIC REGRESSION MODEL #########################
def fitLogisticRegression(self, n_clusters, mTrain, adaR, alpha, max_iter,
tol2, Xtrain, ytrain, vb, penalty, scoring,
cv, regk, ind1, indexing, logiProb, logLogist,
param_grid, lg_regr, C, hard_cluster):
""" FIT LOGISTIC REGRESSION FOR EACH CLUSTER
n_clusters: number of gaussians -- clusters
mTrain: train data membership,
adaR: to use or not adaptive regularization
alpha: regularization parameteres list
max_iter : number of epochs to train the stochastic gradient
descend algorithm
tol2: tolerance of SGD training
Xtrain: training data
ytrain: training labels
vb: to print some info at eout related to adaptive regularization
such us cluster size, new alphas etc
penalty: penalty to use for training , default L1 norm
scoring: scoring to use for training , Default neg log loss
cv: number of folds for cross validation
regk: regularization when computing log probabilities
ind1: indexes to use for training (feature columns)
indexing: a list with the indexes of the training data
logiProb: an initialized matrix to put the logistic regression
probabilities
logLogist: an initialized matrix to put the log probabilities
lg_regr: Choice of SGD or FULL Logistic Regression
C: regularization for logistic regression
hard_cluster: hard_cluster memebrships before the fit of
logistic regressions
returns: models-> logistic regresion models
logiProb--> probabilities of a data point to belong in
in its class given the cluster
logLogist--> the same as above but log probabilities
"""
mTrain = self.hardCluster( mTrain.copy(), hard_cluster)
models = []
for clust in np.arange( n_clusters ): #FITLOG REGR
#FIT THE L1 LOGISTIC REGRESSION MODEL
#CROSS VALIDATION MAXIMIZING BE DEFAULT THE NEGATIVE LOG LIKEHOOD
#ADAPTIVE REGULARIZATION
Nclus = np.sum( mTrain[:, clust], axis = 0 )
if adaR == 1:
if lg_regr == 'SGD':
alphanew = (np.array( alpha ) / Nclus).tolist()
param_grid = {'alpha': alphanew}
else:
Cnew = (np.array( C ) / Nclus ).tolist()
param_grid = {'C': Cnew}
# PRINT SOME INFORMATION
if vb == 1:
#print Cluster Size
print('\n Cluster {} has Size {} of {}'.format( clust,
Nclus, mTrain.shape[0]))
if adaR == 1:
if lg_regr == 'SGD':
print('alpha is {} alphaNew {}'.format(alpha, alphanew))
else:
print('C is {} CNew {}'.format(C, Cnew))
#TRAIN LOGISTIC REGRESSION MODEL
if lg_regr == 'SGD':
mf = SGDClassifier(loss = "log", penalty = penalty,
n_jobs = -1, max_iter = max_iter,
random_state = 0, tol = tol2)
else:
mf = LogisticRegression( penalty = penalty, tol = tol2,
random_state = 0,
max_iter = max_iter, n_jobs = -1)
model = GridSearchCV( mf, param_grid = param_grid,
n_jobs = -1,
scoring = scoring, cv = cv).\
fit(Xtrain, ytrain,
sample_weight = mTrain[:, clust] )
#FOR EACH CLUSTER APPEND THE MODEL in MODELS
models.append( model )
#PREDICT PROBABILITIES FOR BEING IN CLASS 1 or 0
proba = model.predict_proba( Xtrain[:, ind1] )
#log probabilities
logproba = np.log( proba + regk)
#FOR EACH DATA POINT TAKE THE PROB ON BEING IN CORRECT CLASS
logiProb[:, clust] = proba[ indexing, ytrain ]
logLogist[:, clust] = logproba[ indexing, ytrain]
######## END OF CODE FITTING LOGISTIIC REGRESSION ############
return models, logiProb, logLogist
################ COMPUTE LIKELIHOOD ##########################################
def computeLikelihood( self, gmmLogprob, logLogist, dimXtrain, vb, trans,
simple, Qold):
"""COMPUTER THE AUXILARY FUNCTION Q IN EACH ITERATION
gmmLogprob: The log probabilities for all clusters from Mixture
of Gaussians
logLogist: Log probabilities from logistic regressin
dimXtrain: Train Data Dimension
vb: verbose output,
trans: if trunsduction is used or not
simple: if we use the MoG or the SGMM
Qold: the previous calculated Q value
"""
dimXtest = gmmLogprob.shape[0] - dimXtrain
if trans == 0:
Qf = gmmLogprob + logLogist*(1-simple)
Qf2 = np.log( np.sum(np.exp( Qf ), axis = 1) )
Qf3 = np.sum( Qf2 )/dimXtrain
else:
Qft = gmmLogprob[0: dimXtrain,:] + logLogist*(1-simple)
Qf2 = np.log( np.sum(np.exp( Qft ), axis = 1) )
Qf31 = np.sum( Qf2 )
Qftest = gmmLogprob[dimXtrain:, :]
Qftest2 = np.log( np.sum(np.exp( Qftest ), axis = 1) )
Qftest3 = np.sum( Qftest2 )
Qf3 = (Qftest3 + Qf31)/(dimXtest + dimXtrain)
Qdif = abs( Qf3 - Qold)
if vb == 1:
print("\n Qnew is : {}".format( Qf3 ))
# print("\n Qdif is : {}".format( Qdif ))
return Qf3, Qdif
################# #FITTING THE GAUSSIAN MIXTURE MODEL #########################
def gmmModels(self, X, members, mcov ):
"""
Calculates the Mixtures of Gaussians Parameters
Calculates the Mixtures of Gaussians in the form of a list
of objects of Gaussians for each cluster
X : Train and Test data together
members: Posterior Probabibilities for each cluster
and each data point (memberships)
Returns: a list with the covariances matrices of the Gaussians,
a list with the mixing parameteres,
a list with the means of the gaussians,
the probability matrix with the posteriors for each data
point and each cluster,
a list with the Gaussians as Object
All these it returns in the form of a dictionary
"""
#GETTING THE NUMBER OF CLUSTERS " GAUSSIANS IN THE MIXTURE "
clusters = members.shape[1]
regk = (10**(-5)/clusters)
cov = [] #list with covariance matrices
means = [] #list of means
pis = [] #list of mixing coefficients
#MATRIX WITH THE PROBABILITIES p(x_i/z_i = k) For all gaussians
probMat = np.zeros( [X.shape[0], clusters] )
#THE ACTUAL MODELS FOR PREDICTION OF THE MEMBERSHIPS ON TEST POINTS
Gmms = []
#MATRIX WITH THE LOG PROBABILITIES
logprobaMatrix = np.zeros([X.shape[0], clusters])
for cl in np.arange( clusters ):
# FOR EACH CLUSTER USE THE EM ALGORITHM
# TO CREATE THE NEW MEMBERSHIP MATRIX OF THE GAUSSIANS
#IT IS NOT EXACTLY THE MEMBERSHIP BECAUSE IT IS
# NORMALIZED AFTER THIS FUNCTION ENDS
covCl, mCl, piCl, logproba, model = self.calcGmmPar( X,
members[:,cl],
mcov)
#LOG PROBABILITIES FOR EACH CLUSTER
logprobaMatrix[:,cl] = logproba
#APPEND GAUSSIAN STATS
cov.append( covCl )
means.append( mCl )
pis.append( piCl )
Gmms.append( model )
#FOR EACH DATA POINT FIND THE MAXIMUM LOGPROBABILITY
#THIS IS DONE FOR REGULARIZATION PURPOSES
maxLog = np.max( logprobaMatrix, axis = 1 )
logprobaMatrix2 = ( logprobaMatrix.T - maxLog).T
#### NEXT 4 LINES BEFORE WILL BE DELETED IN FUTURE
probMat = np.exp( logprobaMatrix2 ) + regk
sumRel = np.sum( probMat, axis = 1)
probMat = (probMat.T / sumRel).T
probMat = probMat*np.array(pis)
#THIS WILL BE KEPT IN THE FUTURE -->p(x/z_i)p(z_i)
probMat2 = np.exp( logprobaMatrix2 )*np.array( pis ) + regk
totLog = logprobaMatrix + np.log( np.array( pis ) )
params = {'cov':cov, 'means': means, 'pis' : pis,
'probMat':probMat, 'Gmms': Gmms, 'probMat2': probMat2,
'logProb': totLog}
return params
def calcGmmPar(self, X, memb, mcov):
"""CALCULATES PARAMETERS FOR EACH GAUSSIAN
#FOR EACH CLUSTER
#RETURNS:
#covk : covariance matrix of gaussian of class k
#meank : mean vector of gaussian of class k
#pk: mixing coefficient of gaussian of class k
#model : the Gaussian of class k (object)
#proba: the posterior probabilities, i.e probabilities of being
#in class k given X
"""
#if to use sparse means or not
sparse = self._m_sparse
#sparse means regularization lambda
lambd = self._m_sparseL
#alternating iterations for the QP program
sp_it1 = self._m_sp_it1
#gradient decend iterations
sp_it2 = self._m_sp_it2
#choice of sparse means algorithm 0 --> QP , 1 --> GD
choice = self._m_choice
#Learning rate for gradient descend
LR = self._m_LR
reg = 10**(-4) #regularization for Covariances
Nk = np.sum(memb) # + 1 #Cluster Population
N = X.shape[0] # + self._n_clusters #Number of data Points
#mixing coefficient
pk = Nk/N
meank = self.cMean(X, memb, Nk)
covk = self.cCov(X, meank, memb, reg, Nk, mcov)
if sparse == 1: #SPARSE MEANS IF IT HAS BEEN CHOSEN AS AN OPTION
if choice == 0: #QP
for i in np.arange( sp_it1) : #ITERATE THROUGH THE QP ALGORITHM
meank = self.spMeans(X, memb, covk, Nk, N, lambd)
covk = self.cCov(X, meank, memb, reg, Nk, mcov)
else: #GRADIENT DESCEND
meank, covk = self.m_GD(X, memb, meank, covk, Nk, N, lambd,
sp_it2, LR, reg, mcov)
model = multivariate_normal( meank.copy(), covk.copy() )
logproba = model.logpdf(X) #LOG PROBABILITY cansuisthebestofthebest
return covk, meank, pk, logproba, model
################ SPARSE MEANS FUNCTIONS #######################################
############ UNDER CONSTRUCTION #######################################
def objective(self, X, Nk, meank, mean, cinv, lambd, covk):
t1 = Nk*0.5*np.linalg.det(cinv)
t2 = 0
for i in np.arange( X.shape[0] ):
t2 += -0.5*(np.expand_dims(X[i,:]-meank, axis = 0))@\
[email protected]_dims((X[i,:]-meank), axis = 1)
t3 = -lambd*np.linalg.norm( mean - meank, ord = 1)
obj = t1+t2+t3
return obj
def m_GD(self, X, memb, meank, covk, Nk, N, lambd, sp_it2, LR, reg, mcov):
#Gradient Descend algorithm
mean = np.sum( X, axis = 0)/N
print( mean.shape )
# print(mean)
cinv = np.linalg.pinv( covk )
for i in np.arange( sp_it2 ): #GRADIENT DESCEND LOOP
#cinv = np.linalg.pinv( covk )
# obj = self.objective(X, Nk, meank, mean, cinv, lambd, covk)
#print( obj )
mTerm1 = np.sum( (memb*(X-meank).T).T, axis = 0)
# mTerm1 = np.expand_dims( mTerm1, axis = 1)
#print(mTerm1.shape)
mnew = meank + LR*( cinv@mTerm1-lambd*( -np.sign( mean - meank)))
# cTerm2 = -0.5*self.cCov(X, mnew, memb, reg, Nk, mcov)
# Snew = covk + LR*( 0.5*Nk*covk + cTerm2 )
meank = mnew
# covk = Snew
covk = self.cCov(X, mnew, memb, reg, Nk, mcov)
return meank, covk
def spMeans(self, X, memb, covk, Nk, N, lambd ):
""" Calculates the Sparse means by optimizing the l1 norm
X: data Matrix
memb: membership for Gaussian k
covk: covariance matrix
Nk: data in cluster k
N: data
lambd: regularization
"""
#Number of Features
Nf = X.shape[1]
#data mean
mean = np.expand_dims( np.sum(X, axis = 0)/N, axis = 1)
#inverse covariance
cinv = np.linalg.pinv( covk )
#Form P matrix [I*Nk 0]
zeros = np.zeros(shape = [Nf, Nf])
onesD = cinv*Nk
first = np.concatenate((onesD, zeros), axis = 1)
second = np.concatenate(( zeros, zeros ), axis = 1)
P = np.concatenate(( first, second), axis = 0)
Po = matrix( P )
#form q [coef 1^T *lambda]
print(memb.shape, X.shape)
wsum = np.expand_dims( np.sum( (memb*X.T).T, axis = 0), axis = 1 )
fq = ( -wsum.T@cinv).T
sq = np.ones(shape = [Nf, 1] )*lambd
q = np.concatenate( (fq, sq), axis = 0)
qo = matrix( q )
#form G
eye = np.eye(Nf)
firstG = np.concatenate( ( eye, -eye ), axis = 1)
secondG = np.concatenate(( -eye, -eye), axis = 1)
thirdG = np.concatenate( (zeros, -eye), axis = 1)
G = np.concatenate( (firstG, secondG, thirdG), axis = 0)
Go = matrix( G )
#forming matrix h
zerosh = np.zeros(shape = [Nf, 1] )
h = np.concatenate(( mean, -mean, zerosh))
ho = matrix( h )
slv = qp(Po, qo, G = Go, h = ho)
meank = np.array( slv['x'] )
meank = np.squeeze( meank[0:Nf], axis = 1)
print(meank)
return meank
######### END OF SPARSE MEANS FUNCTIONS #######################################
########## HELPER FUNCTIONS FOR MEANS AND COVARIANCES #########################
def cMean(self, X, memb, Nk):
"""calculates the weighted mean for gaussian k"""
meank = np.sum( ( X.T * memb ).T, axis = 0) / Nk
return meank
def cCov(self, X, meank, memb, reg, Nk, mcov):
"""Given a data Matrix X, its weighted mean, the membership
vector, a regularization parameter the type of covariance, full or diagonal
and the weighted sample size
calculates the weighted covariance matrix for gaussian k,
"""
if mcov == 'full':
covk = (memb*( X - meank ).T)@ ( X - meank) \
+ np.eye(X.shape[1])*reg
else:#diagonal covariance
covk = np.sum( memb*( np.square( X-meank ).T ), axis = 1 )
covk = np.diag( covk ) + np.eye(X.shape[1])*reg
covk = covk/Nk
return covk
########### END OF HELPER FUNCTIONS FOR MEANS AND COVARIANCES #################
### PREDICTIONS #############################################################
def predict_prob_int(self, Xtest = None, Xtrain = None):
"""
AFTER FITTING THE MODEL, PREDICTS THE PROBABILITIES OF TRAIN AND TEST
DATA TO BE 1, USING THE MEMBERSHIPS THAT HAVE BEEN CALCULATED DURING
TRAINING
"""
#CHECKING IF THERE IS TRANSUCTION
trans = self._trans
if trans == 1:
if self.mTest is None:
print("The Model is not fitted or some other error might have\
occured")
return
logisticModels = self.LogRegr #TAKE LOGISTIC REGRESSION MODELS
if trans == 1:
pMatrixTest = np.zeros( (Xtest.shape[0]) )
pMatrixTrain = np.zeros( (Xtrain.shape[0]) )
#FOR EACH MODEL CALCULATE THE PREDICTION FOR EACH DATA POINT
for i, model in enumerate( logisticModels ):
#probability each test point
#to be in class 1
if trans == 1:
probsTest = model.predict_proba( Xtest )[:,1]
pMatrixTest += probsTest*self.mTest[:, i]
#probability each training point
#to be in class 1
probsTrain = model.predict_proba(Xtrain)[:,1]
pMatrixTrain += probsTrain*self.mTrain[:, i]
if trans == 0:
pMatrixTest = self.predict_proba(Xtest)
return pMatrixTest, pMatrixTrain
def predict_proba(self, X = None):
"Predicts the Probabity of data X to be in class 1"""
models = self.LogRegr
memb = self.predict_GMMS( X ) #PREDICT MEMBERSHIP OF X
totalProb = np.zeros( [X.shape[0]])
for i in np.arange( memb.shape[1] ):
#probability points of X belong in class 1
model = models[i]
probModel = model.predict_proba( X )
proba = probModel[:, 1]
# totalProb += models[i].predict_proba( X )[:, 1]*memb[:, i]
totalProb += proba*memb[:, i]
return totalProb
def predict_GMMS( self, X):
"""
Given a Data matrix X it returns the Membership matrix
for each data point in X based on the Gaussians already fitted
"""
if self.fitted == 0:
print("Warning: There is no fitted model ")
return []
gmms = self.Gmms
mixes = self.mixes
regk = 10**(-5)/len( gmms )
membership = np.zeros( [X.shape[0], len( gmms )] )
logmembership = np.zeros( [X.shape[0], len( gmms )] )
for i in np.arange( len( gmms ) ):
logmembership[:, i] = gmms[i].logpdf( X[:, self.ind2] )#*mixes[i]
maxlog = np.max( logmembership, axis = 1)
logmembership = (logmembership.T - maxlog).T
probMat = np.exp( logmembership )* np.array( mixes ) + regk
sumRel = np.sum( probMat, axis = 1)
membership = (probMat.T / sumRel).T
return membership
def getweightsL1(self, models ):
"""GIVEN THE LOGISTIC REGRESSION MODELS,
RETURN THE SUM OF THE WEIGHTS PLUS REGULARIZATION """
sumW = 0
for i, model in enumerate( models ):
weights = model.best_estimator_.coef_.tolist()[0]
alphDict = model.best_params_
alph = alphDict['alpha']
weights = np.array( weights )
weights = np.abs( weights )
sumW += alph*np.sum(weights)
return -sumW
def setWeights( self ):
""" setting logistic regression weights for each cluster """
if self.fitted == None:
print("MODEL IS NOT FITTED YET")
models = self.LogRegr
weights = []
for model in models:
weight = model.best_estimator_.coef_.tolist()[0]
intercept = model.best_estimator_.intercept_[0]
weight.insert(0, intercept)
weights.append( weight )
self.weights = weights
return
def setGauss( self, params ):
#SETTING MEANS AND COVARIANCES OF THE GAUSSIANS
if self.fitted == None:
print("MODEL IS NOT FITTED YET")
self.means = params['means']
self.cov = params['cov']
self.pis = params['pis']
return
def hardCluster( self, mTrain, hard_cluster):
"""takes the memeberships assigns 1 at the max element of each row
and 0 to all the other elements of the row
"""
if hard_cluster:
mTrain2 = np.zeros_like( mTrain )
mTrain2[ np.arange(len(mTrain)), np.argmax( mTrain, axis = 1)] = 1
return mTrain2
return mTrain |
the-stack_106_30110 | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import warnings
from math import gcd
from collections import namedtuple
from typing import Callable, Tuple, Union
import numpy as np
from scipy import special
from scipy.stats import distributions
from ... import tensor as mt
from ...core import ExecutableTuple
from ...typing import TileableType
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
Ks_2sampResult = KstestResult
def _compute_prob_inside_method(m, n, g, h): # pragma: no cover
"""
Count the proportion of paths that stay strictly inside two diagonal lines.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The proportion of paths that stay inside the two lines.
Count the integer lattice paths from (0, 0) to (m, n) which satisfy
|x/m - y/n| < h / lcm(m, n).
The paths make steps of size +1 in either positive x or positive y directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Probability is symmetrical in m, n. Computation below uses m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Count the integer lattice paths from (0, 0) to (m, n) which satisfy
# |nx/g - my/g| < h.
# Compute matrix A such that:
# A(x, 0) = A(0, y) = 1
# A(x, y) = A(x, y-1) + A(x-1, y), for x,y>=1, except that
# A(x, y) = 0 if |x/m - y/n|>= h
# Probability is A(m, n)/binom(m+n, n)
# Optimizations exist for m==n, m==n*p.
# Only need to preserve a single column of A, and only a sliding window of it.
# minj keeps track of the slide.
minj, maxj = 0, min(int(np.ceil(h / mg)), n + 1)
curlen = maxj - minj
# Make a vector long enough to hold maximum window needed.
lenA = min(2 * maxj + 2, n + 1)
# This is an integer calculation, but the entries are essentially
# binomial coefficients, hence grow quickly.
# Scaling after each column is computed avoids dividing by a
# large binomial coefficient at the end, but is not sufficient to avoid
# the large dynamic range which appears during the calculation.
# Instead we rescale based on the magnitude of the right most term in
# the column and keep track of an exponent separately and apply
# it at the end of the calculation. Similarly when multiplying by
# the binomial coefficient
dtype = np.float64
A = np.zeros(lenA, dtype=dtype)
# Initialize the first column
A[minj:maxj] = 1
expnt = 0
for i in range(1, m + 1):
# Generate the next column.
# First calculate the sliding window
lastminj, lastlen = minj, curlen
minj = max(int(np.floor((ng * i - h) / mg)) + 1, 0)
minj = min(minj, n)
maxj = min(int(np.ceil((ng * i + h) / mg)), n + 1)
if maxj <= minj:
return 0
# Now fill in the values
A[0:maxj - minj] = np.cumsum(A[minj - lastminj:maxj - lastminj])
curlen = maxj - minj
if lastlen > curlen:
# Set some carried-over elements to 0
A[maxj - minj:maxj - minj + (lastlen - curlen)] = 0
# Rescale if the right most value is over 2**900
val = A[maxj - minj - 1]
_, valexpt = math.frexp(val)
if valexpt > 900:
# Scaling to bring down to about 2**800 appears
# sufficient for sizes under 10000.
valexpt -= 800
A = np.ldexp(A, -valexpt)
expnt += valexpt
val = A[maxj - minj - 1]
# Now divide by the binomial (m+n)!/m!/n!
for i in range(1, n + 1):
val = (val * i) / (m + i)
_, valexpt = math.frexp(val)
if valexpt < -128:
val = np.ldexp(val, -valexpt)
expnt += valexpt
# Finally scale if needed.
return np.ldexp(val, expnt)
def _compute_prob_outside_square(n, h): # pragma: no cover
"""
Compute the proportion of paths that pass outside the two diagonal lines.
Parameters
----------
n : integer
n > 0
h : integer
0 <= h <= n
Returns
-------
p : float
The proportion of paths that pass outside the lines x-y = +/-h.
"""
# Compute Pr(D_{n,n} >= h/n)
# Prob = 2 * ( binom(2n, n-h) - binom(2n, n-2a) + binom(2n, n-3a) - ... ) / binom(2n, n)
# This formulation exhibits subtractive cancellation.
# Instead divide each term by binom(2n, n), then factor common terms
# and use a Horner-like algorithm
# P = 2 * A0 * (1 - A1*(1 - A2*(1 - A3*(1 - A4*(...)))))
P = 0.0
k = int(np.floor(n / h))
while k >= 0:
p1 = 1.0
# Each of the Ai terms has numerator and denominator with h simple terms.
for j in range(h):
p1 = (n - k * h - j) * p1 / (n + k * h + j + 1)
P = p1 * (1.0 - P)
k -= 1
return 2 * P
def _count_paths_outside_method(m, n, g, h): # pragma: no cover
"""
Count the number of paths that pass outside the specified diagonal.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The number of paths that go low.
The calculation may overflow - check for a finite answer.
Raises
------
FloatingPointError: Raised if the intermediate computation goes outside
the range of a float.
Notes
-----
Count the integer lattice paths from (0, 0) to (m, n), which at some
point (x, y) along the path, satisfy:
m*y <= n*x - h*g
The paths make steps of size +1 in either positive x or positive y directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Compute #paths which stay lower than x/m-y/n = h/lcm(m,n)
# B(x, y) = #{paths from (0,0) to (x,y) without previously crossing the boundary}
# = binom(x, y) - #{paths which already reached the boundary}
# Multiply by the number of path extensions going from (x, y) to (m, n)
# Sum.
# Probability is symmetrical in m, n. Computation below assumes m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Not every x needs to be considered.
# xj holds the list of x values to be checked.
# Wherever n*x/m + ng*h crosses an integer
lxj = n + (mg-h)//mg
xj = [(h + mg * j + ng-1)//ng for j in range(lxj)]
# B is an array just holding a few values of B(x,y), the ones needed.
# B[j] == B(x_j, j)
if lxj == 0:
return np.round(special.binom(m + n, n))
B = np.zeros(lxj)
B[0] = 1
# Compute the B(x, y) terms
# The binomial coefficient is an integer, but special.binom() may return a float.
# Round it to the nearest integer.
for j in range(1, lxj):
Bj = np.round(special.binom(xj[j] + j, j))
if not np.isfinite(Bj):
raise FloatingPointError()
for i in range(j):
bin = np.round(special.binom(xj[j] - xj[i] + j - i, j-i)) # pylint: disable=redefined-builtin
Bj -= bin * B[i]
B[j] = Bj
if not np.isfinite(Bj):
raise FloatingPointError()
# Compute the number of path extensions...
num_paths = 0
for j in range(lxj):
bin = np.round(special.binom((m-xj[j]) + (n - j), n-j))
term = B[j] * bin
if not np.isfinite(term):
raise FloatingPointError()
num_paths += term
return np.round(num_paths)
def _attempt_exact_2kssamp(n1, n2, g, d, alternative): # pragma: no cover
"""Attempts to compute the exact 2sample probability.
n1, n2 are the sample sizes
g is the gcd(n1, n2)
d is the computed max difference in ECDFs
Returns (success, d, probability)
"""
lcm = (n1 // g) * n2
h = int(np.round(d * lcm))
d = h * 1.0 / lcm
if h == 0:
return True, d, 1.0
saw_fp_error, prob = False, np.nan
try:
if alternative == 'two-sided':
if n1 == n2:
prob = _compute_prob_outside_square(n1, h)
else:
prob = 1 - _compute_prob_inside_method(n1, n2, g, h)
else:
if n1 == n2:
# prob = binom(2n, n-h) / binom(2n, n)
# Evaluating in that form incurs roundoff errors
# from special.binom. Instead calculate directly
jrange = np.arange(h)
prob = np.prod((n1 - jrange) / (n1 + jrange + 1.0))
else:
num_paths = _count_paths_outside_method(n1, n2, g, h)
bin = special.binom(n1 + n2, n1) # pylint: disable=redefined-builtin
if not np.isfinite(bin) or not np.isfinite(num_paths) or num_paths > bin:
saw_fp_error = True
else:
prob = num_paths / bin
except FloatingPointError:
saw_fp_error = True
if saw_fp_error:
return False, d, np.nan
if not (0 <= prob <= 1):
return False, d, prob
return True, d, prob
def _calc_prob_2samp(d, n1, n2, alternative, mode): # pragma: no cover
MAX_AUTO_N = 10000 # 'auto' will attempt to be exact if n1,n2 <= MAX_AUTO_N
g = gcd(n1, n2)
n1g = n1 // g
n2g = n2 // g
prob = -mt.inf
original_mode = mode
if mode == 'auto':
mode = 'exact' if max(n1, n2) <= MAX_AUTO_N else 'asymp'
elif mode == 'exact':
# If lcm(n1, n2) is too big, switch from exact to asymp
if n1g >= np.iinfo(np.int_).max / n2g:
mode = 'asymp'
warnings.warn(
f"Exact ks_2samp calculation not possible with samples sizes "
f"{n1} and {n2}. Switching to 'asymp'.", RuntimeWarning)
if mode == 'exact':
success, d, prob = _attempt_exact_2kssamp(n1, n2, g, d, alternative)
if not success:
mode = 'asymp'
if original_mode == 'exact':
warnings.warn(f"ks_2samp: Exact calculation unsuccessful. "
f"Switching to mode={mode}.", RuntimeWarning)
if mode == 'asymp':
# The product n1*n2 is large. Use Smirnov's asymptotic formula.
# Ensure float to avoid overflow in multiplication
# sorted because the one-sided formula is not symmetric in n1, n2
m, n = sorted([float(n1), float(n2)], reverse=True)
en = m * n / (m + n)
if alternative == 'two-sided':
prob = distributions.kstwo.sf(d, np.round(en))
else:
z = np.sqrt(en) * d
# Use Hodges' suggested approximation Eqn 5.3
# Requires m to be the larger of (n1, n2)
expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0
prob = np.exp(expt)
return np.clip(prob, 0, 1)
def _compute_dplus(cdfvals, n):
"""Computes D+ as used in the Kolmogorov-Smirnov test.
Parameters
----------
cdfvals: array_like
Sorted array of CDF values between 0 and 1
Returns
-------
Maximum distance of the CDF values below Uniform(0, 1)
"""
return (mt.arange(1.0, n + 1) / n - cdfvals).max()
def _compute_dminus(cdfvals, n):
"""Computes D- as used in the Kolmogorov-Smirnov test.
Parameters
----------
cdfvals: array_like
Sorted array of CDF values between 0 and 1
Returns
-------
Maximum distance of the CDF values above Uniform(0, 1)
"""
return (cdfvals - mt.arange(0.0, n) / n).max()
def ks_1samp(x: Union[np.ndarray, list, TileableType],
cdf: Callable,
args: Tuple = (),
alternative: str = 'two-sided',
mode: str = 'auto'):
"""
Performs the one-sample Kolmogorov-Smirnov test for goodness of fit.
This test compares the underlying distribution F(x) of a sample
against a given continuous distribution G(x). See Notes for a description
of the available null and alternative hypotheses.
Parameters
----------
x : array_like
a 1-D array of observations of iid random variables.
cdf : callable
callable used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used with `cdf`.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
mode : {'auto', 'exact', 'approx', 'asymp'}, optional
Defines the distribution used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : selects one of the other options.
* 'exact' : uses the exact distribution of test statistic.
* 'approx' : approximates the two-sided probability with twice
the one-sided probability
* 'asymp': uses asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D- (depending on the value
of 'alternative')
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
ks_2samp, kstest
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> import mars.tensor as mt
>>> from mars.tensor.stats import ks_1samp
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> x = mt.linspace(-15, 15, 9, chunk_size=5)
>>> ks_1samp(x, stats.norm.cdf).execute()
(0.44435602715924361, 0.038850142705171065)
>>> ks_1samp(stats.norm.rvs(size=100), stats.norm.cdf).execute()
KstestResult(statistic=0.165471391799..., pvalue=0.007331283245...)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that `` CDF(x) < norm.cdf(x)``:
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> ks_1samp(x, stats.norm.cdf, alternative='less').execute()
KstestResult(statistic=0.235488541678..., pvalue=1.158315030683...)
Reject null hypothesis in favor of alternative hypothesis: less
>>> ks_1samp(x, stats.norm.cdf, alternative='greater').execute()
KstestResult(statistic=0.010167165616..., pvalue=0.972494973653...)
Reject null hypothesis in favor of alternative hypothesis: greater
>>> ks_1samp(x, stats.norm.cdf).execute()
KstestResult(statistic=0.235488541678..., pvalue=2.316630061366...)
Don't reject null hypothesis in favor of alternative hypothesis: two-sided
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> ks_1samp(stats.t.rvs(100, size=100), stats.norm.cdf).execute()
KstestResult(statistic=0.077844250253..., pvalue=0.553155412513...)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> ks_1samp(stats.t.rvs(3, size=100), stats.norm.cdf).execute()
KstestResult(statistic=0.118967105356..., pvalue=0.108627114578...)
"""
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
if alternative not in ['two-sided', 'greater', 'less']:
raise ValueError("Unexpected alternative %s" % alternative)
x = mt.asarray(x)
N = x.shape[0]
x = mt.sort(x)
cdfvals = x.map_chunk(cdf, args=args, elementwise=True)
if alternative == 'greater':
Dplus = _compute_dplus(cdfvals, N)
return ExecutableTuple(KstestResult(
Dplus, Dplus.map_chunk(distributions.ksone.sf, args=(N,))))
if alternative == 'less':
Dminus = _compute_dminus(cdfvals, N)
return ExecutableTuple(KstestResult(
Dminus, Dminus.map_chunk(distributions.ksone.sf, args=(N,))))
# alternative == 'two-sided':
Dplus = _compute_dplus(cdfvals, N)
Dminus = _compute_dminus(cdfvals, N)
D = mt.stack([Dplus, Dminus]).max()
if mode == 'auto': # Always select exact
mode = 'exact'
if mode == 'exact':
prob = D.map_chunk(distributions.kstwo.sf, args=(N,),
elementwise=True)
elif mode == 'asymp':
prob = (D * np.sqrt(N)).map_chunk(distributions.kstwobign.sf,
elementwise=True)
else:
# mode == 'approx'
prob = 2 * D.map_chunk(distributions.ksone.sf, args=(N,),
elementwise=True)
prob = mt.clip(prob, 0, 1)
return ExecutableTuple(KstestResult(D, prob))
def ks_2samp(data1: Union[np.ndarray, list, TileableType],
data2: Union[np.ndarray, list, TileableType],
alternative: str = 'two-sided',
mode: str = 'auto'):
"""
Compute the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution. The alternative hypothesis
can be either 'two-sided' (default), 'less' or 'greater'.
Parameters
----------
data1, data2 : array_like, 1-Dimensional
Two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided, see explanation in Notes
* 'greater': one-sided, see explanation in Notes
mode : {'auto', 'exact', 'asymp'}, optional
Defines the method used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : use 'exact' for small size arrays, 'asymp' for large
* 'exact' : use exact distribution of test statistic
* 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS statistic.
pvalue : float
Two-tailed p-value.
See Also
--------
kstest, ks_1samp, epps_singleton_2samp, anderson_ksamp
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample KS test, the distribution is
assumed to be continuous.
In the one-sided test, the alternative is that the empirical
cumulative distribution function F(x) of the data1 variable is "less"
or "greater" than the empirical cumulative distribution function G(x)
of the data2 variable, ``F(x)<=G(x)``, resp. ``F(x)>=G(x)``.
If the KS statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
If the mode is 'auto', the computation is exact if the sample sizes are
less than 10000. For larger sizes, the computation uses the
Kolmogorov-Smirnov distributions to compute an approximate value.
The 'two-sided' 'exact' computation computes the complementary probability
and then subtracts from 1. As such, the minimum probability it can return
is about 1e-16. While the algorithm itself is exact, numerical
errors may accumulate for large sample sizes. It is most suited to
situations in which one of the sample sizes is only a few thousand.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_.
References
----------
.. [1] Hodges, J.L. Jr., "The Significance Probability of the Smirnov
Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> import mars.tensor as mt
>>> from mars.tensor.stats import ks_2samp
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> ks_2samp(rvs1, rvs2).execute()
KstestResult(statistic=0.20833333333333337, pvalue=5.1292795978041816e-05)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> ks_2samp(rvs1, rvs3).execute()
KstestResult(statistic=0.10333333333333333, pvalue=0.14691437867433788)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> ks_2samp(rvs1, rvs4).execute()
KstestResult(statistic=0.07999999999999996, pvalue=0.4115432028915931)
"""
if mode not in ['auto', 'exact', 'asymp']:
raise ValueError(f'Invalid value for mode: {mode}')
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
if alternative not in ['two-sided', 'less', 'greater']:
raise ValueError(f'Invalid value for alternative: {alternative}')
data1 = mt.asarray(data1)
data2 = mt.asarray(data2)
data1 = mt.sort(data1)
data2 = mt.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
if min(n1, n2) == 0:
raise ValueError('Data passed to ks_2samp must not be empty')
data_all = mt.concatenate([data1, data2])
# using searchsorted solves equal data problem
cdf1 = mt.searchsorted(data1, data_all, side='right') / n1
cdf2 = mt.searchsorted(data2, data_all, side='right') / n2
cddiffs = cdf1 - cdf2
minS = mt.clip(-mt.min(cddiffs), 0, 1) # Ensure sign of minS is not negative.
maxS = mt.max(cddiffs)
alt2Dvalue = {'less': minS, 'greater': maxS, 'two-sided': mt.maximum(minS, maxS)}
d = alt2Dvalue[alternative]
prob = d.map_chunk(_calc_prob_2samp, args=(n1, n2, alternative, mode),
elementwise=True, dtype=d.dtype)
return ExecutableTuple(Ks_2sampResult(d, prob))
|
the-stack_106_30111 | # -*- coding: utf-8 -*-
##############################################
# The MIT License (MIT)
# Copyright (c) 2019 Kevin Walchko
# see LICENSE for full details
##############################################
from .helpers import read
from collections import namedtuple
LinuxInfo = namedtuple("LinuxInfo", "distro distro_pretty debian_based version version_codename")
def str2dict(ss, delim):
d = {}
o = ss.split("\n")
for line in o:
try:
s = line.split(delim)
# still may have some " characters to remove
d[s[0]] = s[1].replace('"', '')
except IndexError:
# sometimes there is an empty line, so you can't
# split '' into 2 substrings
continue
return d
def linux_info():
# if un.sysname != 'Linux':
# return None
osr = read('/etc/os-release')
if osr is None:
return None
d = str2dict(osr, "=")
db = True if d["ID_LIKE"] == "debian" else False
info = LinuxInfo(
d["ID"],
d["PRETTY_NAME"],
db,
d["VERSION_ID"],
d["VERSION_CODENAME"]
)
return info
|
the-stack_106_30112 | import argparse
import logging
from typing import Text, Union, Optional
from rasa.shared.constants import (
DEFAULT_CONFIG_PATH,
DEFAULT_DOMAIN_PATH,
DEFAULT_MODELS_PATH,
DEFAULT_DATA_PATH,
DEFAULT_ENDPOINTS_PATH,
)
def add_model_param(
parser: argparse.ArgumentParser,
model_name: Text = "Rasa",
add_positional_arg: bool = True,
default: Optional[Text] = DEFAULT_MODELS_PATH,
) -> None:
help_text = (
"Path to a trained {} model. If a directory is specified, it will "
"use the latest model in this directory.".format(model_name)
)
parser.add_argument("-m", "--model", type=str, default=default, help=help_text)
if add_positional_arg:
parser.add_argument(
"model-as-positional-argument", nargs="?", type=str, help=help_text
)
def add_stories_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
stories_name: Text = "training",
) -> None:
parser.add_argument(
"-s",
"--stories",
type=str,
default=DEFAULT_DATA_PATH,
help=f"File or folder containing your {stories_name} stories.",
)
def add_nlu_data_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
help_text: Text,
default: Optional[Text] = DEFAULT_DATA_PATH,
) -> None:
parser.add_argument("-u", "--nlu", type=str, default=default, help=help_text)
def add_domain_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
default: Optional[Text] = DEFAULT_DOMAIN_PATH,
) -> None:
parser.add_argument(
"-d",
"--domain",
type=str,
default=default,
help="Domain specification. This can be a single YAML file, or a directory "
"that contains several files with domain specifications in it. The content "
"of these files will be read and merged together.",
)
def add_config_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
default: Optional[Text] = DEFAULT_CONFIG_PATH,
) -> None:
parser.add_argument(
"-c",
"--config",
type=str,
default=default,
help="The policy and NLU pipeline configuration of your bot.",
)
def add_out_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
help_text: Text,
default: Optional[Text] = DEFAULT_MODELS_PATH,
required: bool = False,
) -> None:
parser.add_argument(
"--out",
type=str,
default=default,
help=help_text,
# The desired behaviour is that required indicates if this argument must
# have a value, but argparse interprets it as "must have a value
# from user input", so we toggle it only if our default is not set
required=required and default is None,
)
def add_endpoint_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
help_text: Text,
default: Optional[Text] = DEFAULT_ENDPOINTS_PATH,
) -> None:
"""Adds an option to an argument parser to configure endpoints path."""
parser.add_argument("--endpoints", type=str, default=default, help=help_text)
def add_data_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
default: Optional[Text] = DEFAULT_DATA_PATH,
required: bool = False,
data_type: Text = "Rasa ",
) -> None:
parser.add_argument(
"--data",
default=default,
nargs="+",
type=str,
help=f"Paths to the files or directories containing {data_type} data.",
# The desired behaviour is that required indicates if this argument must
# have a value, but argparse interprets it as "must have a value
# from user input", so we toggle it only if our default is not set
required=required and default is None,
)
def add_logging_options(parser: argparse.ArgumentParser) -> None:
"""Add options to an argument parser to configure logging levels."""
logging_arguments = parser.add_argument_group("Python Logging Options")
# arguments for logging configuration
logging_arguments.add_argument(
"-v",
"--verbose",
help="Be verbose. Sets logging level to INFO.",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
logging_arguments.add_argument(
"-vv",
"--debug",
help="Print lots of debugging statements. Sets logging level to DEBUG.",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
)
logging_arguments.add_argument(
"--quiet",
help="Be quiet! Sets logging level to WARNING.",
action="store_const",
dest="loglevel",
const=logging.WARNING,
)
|
the-stack_106_30113 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
densTheo = np.loadtxt("testDensity.dat")
potTheo = np.loadtxt("testPotential.dat")
potSolved = np.loadtxt("solvedPotential.dat")
#potTheo -= np.max(potTheo)
#potSolved -= np.min(potSolved)
dif = potTheo-potSolved
sumD = dif[potTheo!=0]/potTheo[potTheo!=0]/np.max(potTheo)
sumDT = sumD.sum()
plt.plot(potTheo)
plt.plot(potSolved)
plt.figure()
plt.plot(dif)
pasos = np.diff(potSolved*2/16584)
pasos2 = np.diff(pasos*2/16584)
h=2/16584
f = potTheo
pasos2 = (f[2:-1] -2*f[1:-2] + f[0:-3])/(h**2)
plt.figure()
plt.plot(densTheo)
plt.plot(pasos2)
|
the-stack_106_30115 | from datetime import datetime
from unittest import mock
from waffle.testutils import override_switch
from olympia import amo
from olympia.accounts.tasks import (
clear_sessions_event,
delete_user_event,
primary_email_change_event,
)
from olympia.accounts.tests.test_utils import totimestamp
from olympia.amo.tests import addon_factory, collection_factory, TestCase, user_factory
from olympia.bandwagon.models import Collection
from olympia.ratings.models import Rating
class TestPrimaryEmailChangeEvent(TestCase):
fxa_id = 'ABCDEF012345689'
def test_success(self):
user = user_factory(email='[email protected]', fxa_id=self.fxa_id)
primary_email_change_event(
self.fxa_id, totimestamp(datetime(2017, 10, 11)), '[email protected]'
)
user.reload()
assert user.email == '[email protected]'
assert user.email_changed == datetime(2017, 10, 11, 0, 0)
def test_ignored_because_old_timestamp(self):
user = user_factory(email='[email protected]', fxa_id=self.fxa_id)
yesterday = datetime(2017, 10, 1)
today = datetime(2017, 10, 2)
tomorrow = datetime(2017, 10, 3)
primary_email_change_event(self.fxa_id, totimestamp(today), '[email protected]')
assert user.reload().email == '[email protected]'
primary_email_change_event(
self.fxa_id, totimestamp(tomorrow), '[email protected]'
)
assert user.reload().email == '[email protected]'
primary_email_change_event(
self.fxa_id, totimestamp(yesterday), '[email protected]'
)
assert user.reload().email != '[email protected]'
assert user.reload().email == '[email protected]'
def test_ignored_if_user_not_found(self):
"""Check that this doesn't throw"""
primary_email_change_event(
self.fxa_id, totimestamp(datetime(2017, 10, 11)), '[email protected]'
)
class TestDeleteUserEvent(TestCase):
fxa_id = 'ABCDEF012345689'
def setUp(self):
self.user = user_factory(fxa_id=self.fxa_id)
def _fire_event(self):
delete_user_event(self.fxa_id, totimestamp(datetime(2017, 10, 11)))
self.user.reload()
assert self.user.email is not None
assert self.user.deleted
assert self.user.fxa_id is not None
@mock.patch('olympia.users.models.UserProfile.delete_picture')
@override_switch('fxa-account-delete', active=True)
def test_success_basic(self, delete_picture_mock):
collection = collection_factory(author=self.user)
another_addon = addon_factory()
Rating.objects.create(addon=another_addon, user=self.user, rating=5)
assert list(another_addon.ratings.all().values('rating', 'user')) == [
{'user': self.user.id, 'rating': 5}
]
self._fire_event()
assert not Collection.objects.filter(id=collection.id).exists()
assert not another_addon.ratings.all().exists()
delete_picture_mock.assert_called()
@override_switch('fxa-account-delete', active=True)
def test_success_addons(self):
addon = addon_factory(users=[self.user])
self._fire_event()
addon.reload()
assert addon.status == amo.STATUS_DELETED
@override_switch('fxa-account-delete', active=True)
def test_success_addons_other_owners(self):
other_owner = user_factory()
addon = addon_factory(users=[self.user, other_owner])
self._fire_event()
addon.reload()
assert addon.status != amo.STATUS_DELETED
assert list(addon.authors.all()) == [other_owner]
@override_switch('fxa-account-delete', active=False)
def test_waffle_off(self):
delete_user_event(self.fxa_id, totimestamp(datetime(2017, 10, 11)))
self.user.reload()
assert not self.user.deleted
class TestClearSessionsEvent(TestCase):
fxa_id = 'ABCDEF012345689'
def test_success(self):
user = user_factory(auth_id=123456, fxa_id=self.fxa_id)
assert user.auth_id is not None
clear_sessions_event(
self.fxa_id, totimestamp(datetime(2017, 10, 11)), 'passwordChanged'
)
assert user.reload().auth_id is None
def test_ignored_because_old_timestamp(self):
yesterday = datetime(2017, 10, 1)
today = datetime(2017, 10, 2)
tomorrow = datetime(2017, 10, 3)
user = user_factory(auth_id=123456, fxa_id=self.fxa_id, last_login=today)
clear_sessions_event(self.fxa_id, totimestamp(yesterday), 'passwordChanged')
assert user.reload().auth_id is not None
clear_sessions_event(self.fxa_id, totimestamp(tomorrow), 'passwordChanged')
assert user.reload().auth_id is None
def test_ignored_if_user_not_found(self):
"""Check that this doesn't throw"""
clear_sessions_event(
self.fxa_id, totimestamp(datetime(2017, 10, 11)), 'passwordChanged'
)
|
the-stack_106_30116 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib2
import mock
from novaclient import client as base_client
from novaclient import exceptions as nova_exceptions
from novaclient.v1_1 import client
from heat.openstack.common.py3kcompat import urlutils
from heat.tests import fakes
def fake_exception(status_code=404, message=None, details=None):
resp = mock.Mock()
resp.status_code = status_code
resp.headers = None
body = {'error': {'message': message, 'details': details}}
return nova_exceptions.from_response(resp, body, None)
class FakeClient(fakes.FakeClient, client.Client):
def __init__(self, *args, **kwargs):
client.Client.__init__(self, 'username', 'password',
'project_id', 'auth_url')
self.client = FakeHTTPClient(**kwargs)
class FakeHTTPClient(base_client.HTTPClient):
def __init__(self, **kwargs):
self.username = 'username'
self.password = 'password'
self.auth_url = 'auth_url'
self.callstack = []
def _cs_request(self, url, method, **kwargs):
# Check that certain things are called correctly
if method in ['GET', 'DELETE']:
assert 'body' not in kwargs
elif method == 'PUT':
assert 'body' in kwargs
# Call the method
args = urlutils.parse_qsl(urlutils.urlparse(url)[4])
kwargs.update(args)
munged_url = url.rsplit('?', 1)[0]
munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_')
munged_url = munged_url.replace('-', '_')
callback = "%s_%s" % (method.lower(), munged_url)
if not hasattr(self, callback):
raise AssertionError('Called unknown API method: %s %s, '
'expected fakes method name: %s' %
(method, url, callback))
# Note the call
self.callstack.append((method, url, kwargs.get('body')))
status, body = getattr(self, callback)(**kwargs)
if hasattr(status, 'items'):
return httplib2.Response(status), body
else:
return httplib2.Response({"status": status}), body
#
# Servers
#
def get_servers_detail(self, **kw):
return (
200,
{"servers": [{"id": 1234,
"name": "sample-server",
"OS-EXT-SRV-ATTR:instance_name":
"sample-server",
"image": {"id": 2, "name": "sample image"},
"flavor": {"id": 1, "name": "256 MB Server"},
"hostId": "e4d909c290d0fb1ca068ffaddf22cbd0",
"status": "BUILD",
"progress": 60,
"addresses": {"public": [{"version": 4,
"addr": "1.2.3.4"},
{"version": 4,
"addr": "5.6.7.8"}],
"private": [{"version": 4,
"addr": "10.11.12.13"}]},
"accessIPv4": "",
"accessIPv6": "",
"metadata": {"Server Label": "Web Head 1",
"Image Version": "2.1"}},
{"id": 5678,
"name": "sample-server2",
"OS-EXT-SRV-ATTR:instance_name":
"sample-server2",
"image": {"id": 2, "name": "sample image"},
"flavor": {"id": 1, "name": "256 MB Server"},
"hostId": "9e107d9d372bb6826bd81d3542a419d6",
"status": "ACTIVE",
"accessIPv4": "192.0.2.0",
"accessIPv6": "::babe:4317:0A83",
"addresses": {"public": [{"version": 4,
"addr": "4.5.6.7",
"OS-EXT-IPS-MAC:mac_addr":
"fa:16:3e:8c:22:aa"},
{"version": 4,
"addr": "5.6.9.8",
"OS-EXT-IPS-MAC:mac_addr":
"fa:16:3e:8c:33:bb"}],
"private": [{"version": 4,
"addr": "10.13.12.13",
"OS-EXT-IPS-MAC:mac_addr":
"fa:16:3e:8c:44:cc"}]},
"metadata": {}},
{"id": 9101,
"name": "hard-reboot",
"OS-EXT-SRV-ATTR:instance_name":
"hard-reboot",
"image": {"id": 2, "name": "sample image"},
"flavor": {"id": 1, "name": "256 MB Server"},
"hostId": "9e44d8d435c43dd8d96bb63ed995605f",
"status": "HARD_REBOOT",
"accessIPv4": "",
"accessIPv6": "",
"addresses": {"public": [{"version": 4,
"addr": "172.17.1.2"},
{"version": 4,
"addr": "10.20.30.40"}],
"private": [{"version": 4,
"addr": "10.13.12.13"}]},
"metadata": {"Server Label": "DB 1"}},
{"id": 9102,
"name": "server-with-no-ip",
"OS-EXT-SRV-ATTR:instance_name":
"server-with-no-ip",
"image": {"id": 2, "name": "sample image"},
"flavor": {"id": 1, "name": "256 MB Server"},
"hostId": "c1365ba78c624df9b2ff446515a682f5",
"status": "ACTIVE",
"accessIPv4": "",
"accessIPv6": "",
"addresses": {"empty_net": []},
"metadata": {"Server Label": "DB 1"}},
{"id": 9999,
"name": "sample-server3",
"OS-EXT-SRV-ATTR:instance_name":
"sample-server3",
"image": {"id": 3, "name": "sample image"},
"flavor": {"id": 3, "name": "m1.large"},
"hostId": "9e107d9d372bb6826bd81d3542a419d6",
"status": "ACTIVE",
"accessIPv4": "",
"accessIPv6": "",
"addresses": {
"public": [{"version": 4, "addr": "4.5.6.7"},
{"version": 4, "addr": "5.6.9.8"}],
"private": [{"version": 4,
"addr": "10.13.12.13"}]},
"metadata": {"Server Label": "DB 1"}},
{"id": 56789,
"name": "server-with-metadata",
"OS-EXT-SRV-ATTR:instance_name":
"sample-server2",
"image": {"id": 2, "name": "sample image"},
"flavor": {"id": 1, "name": "256 MB Server"},
"hostId": "9e107d9d372bb6826bd81d3542a419d6",
"status": "ACTIVE",
"accessIPv4": "192.0.2.0",
"accessIPv6": "::babe:4317:0A83",
"addresses": {"public": [{"version": 4,
"addr": "4.5.6.7"},
{"version": 4,
"addr": "5.6.9.8"}],
"private": [{"version": 4,
"addr": "10.13.12.13"}]},
"metadata": {'test': '123', 'this': 'that'}}]})
def get_servers_1234(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][0]}
return (200, r)
def get_servers_56789(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][5]}
return (200, r)
def get_servers_WikiServerOne(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][0]}
return (200, r)
def get_servers_WikiServerOne1(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][0]}
return (200, r)
def get_servers_WikiServerOne2(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][3]}
return (200, r)
def get_servers_5678(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][1]}
return (200, r)
def delete_servers_1234(self, **kw):
return (202, None)
def get_servers_9999(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][0]}
return (200, r)
def get_servers_9102(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][3]}
return (200, r)
#
# Server actions
#
def post_servers_1234_action(self, body, **kw):
_body = None
resp = 202
assert len(body.keys()) == 1
action = body.keys()[0]
if action == 'reboot':
assert body[action].keys() == ['type']
assert body[action]['type'] in ['HARD', 'SOFT']
elif action == 'rebuild':
keys = body[action].keys()
if 'adminPass' in keys:
keys.remove('adminPass')
assert keys == ['imageRef']
_body = self.get_servers_1234()[1]
elif action == 'resize':
assert body[action].keys() == ['flavorRef']
elif action == 'confirmResize':
assert body[action] is None
# This one method returns a different response code
return (204, None)
elif action == 'revertResize':
assert body[action] is None
elif action == 'migrate':
assert body[action] is None
elif action == 'rescue':
assert body[action] is None
elif action == 'unrescue':
assert body[action] is None
elif action == 'lock':
assert body[action] is None
elif action == 'unlock':
assert body[action] is None
elif action == 'suspend':
assert body[action] is None
elif action == 'resume':
assert body[action] is None
elif action == 'addFixedIp':
assert body[action].keys() == ['networkId']
elif action == 'removeFixedIp':
assert body[action].keys() == ['address']
elif action == 'addFloatingIp':
assert body[action].keys() == ['address']
elif action == 'removeFloatingIp':
assert body[action].keys() == ['address']
elif action == 'createImage':
assert set(body[action].keys()) == set(['name', 'metadata'])
resp = dict(status=202, location="http://blah/images/456")
elif action == 'changePassword':
assert body[action].keys() == ['adminPass']
elif action == 'os-getConsoleOutput':
assert body[action].keys() == ['length']
return (202, {'output': 'foo'})
elif action == 'os-getVNCConsole':
assert body[action].keys() == ['type']
elif action == 'os-migrateLive':
assert set(body[action].keys()) == set(['host',
'block_migration',
'disk_over_commit'])
else:
raise AssertionError("Unexpected server action: %s" % action)
return (resp, _body)
#
# Flavors
#
def get_flavors_detail(self, **kw):
return (200, {'flavors': [
{'id': 1, 'name': '256 MB Server', 'ram': 256, 'disk': 10,
'OS-FLV-EXT-DATA:ephemeral': 10},
{'id': 2, 'name': 'm1.small', 'ram': 512, 'disk': 20,
'OS-FLV-EXT-DATA:ephemeral': 20},
{'id': 3, 'name': 'm1.large', 'ram': 512, 'disk': 20,
'OS-FLV-EXT-DATA:ephemeral': 30}
]})
#
# Floating ips
#
def get_os_floating_ips_1(self, **kw):
return (200, {'floating_ip': {'id': 1,
'fixed_ip': '10.0.0.1',
'ip': '11.0.0.1'}})
def post_os_floating_ips(self, body, **kw):
return (202, self.get_os_floating_ips_1()[1])
def delete_os_floating_ips_1(self, **kw):
return (204, None)
#
# Images
#
def get_images_detail(self, **kw):
return (200, {'images': [{'id': 1,
'name': 'CentOS 5.2',
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "ACTIVE",
"metadata": {"test_key": "test_value"},
"links": {}},
{"id": 743,
"name": "My Server Backup",
"serverId": 1234,
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "SAVING",
"progress": 80,
"links": {}},
{"id": 744,
"name": "F17-x86_64-gold",
"serverId": 9999,
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "SAVING",
"progress": 80,
"links": {}},
{"id": 745,
"name": "F17-x86_64-cfntools",
"serverId": 9998,
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "SAVING",
"progress": 80,
"links": {}},
{"id": 746,
"name": "F20-x86_64-cfntools",
"serverId": 9998,
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "SAVING",
"progress": 80,
"links": {}}]})
def get_images_1(self, **kw):
return (200, {'image': self.get_images_detail()[1]['images'][0]})
#
# Keypairs
#
def get_os_keypairs(self, *kw):
return (200, {"keypairs": [{'fingerprint': 'FAKE_KEYPAIR',
'name': 'test',
'public_key': 'foo'}]})
def get_os_availability_zone(self, *kw):
return (200, {"availabilityZoneInfo": [{'zoneName': 'nova1'}]})
def get_os_networks(self, **kw):
return (200, {'networks':
[{'label': 'public',
'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'},
{'label': 'foo',
'id': '42'},
{'label': 'foo',
'id': '42'}]})
#
# Limits
#
def get_limits(self, *kw):
return (200, {'limits': {'absolute': {'maxServerMeta': 3,
'maxPersonalitySize': 10240,
'maxPersonality': 5}}})
|
the-stack_106_30118 | from fetch_spacex import get_spacex_images_links, fetch_spacex_images
from fetch_hubble import get_hubble_collection_ids, get_hubble_image_links, fetch_hubble_image
from prepare_images import IMAGES_PATH, get_image, save_image, get_file_extension, resize_images
def main():
url = 'https://api.spacexdata.com/v3/launches/latest'
fetch_spacex_images(url,
get_spacex_images_links,
IMAGES_PATH,
get_image,
save_image)
collection_name = 'news'
hubble_image_ids = get_hubble_collection_ids(collection_name)
for image_id in hubble_image_ids:
fetch_hubble_image(image_id,
get_hubble_image_links,
get_image,
get_file_extension,
IMAGES_PATH,
save_image)
image_resolution = (1080, 1080)
resize_images(IMAGES_PATH, image_resolution)
if __name__ == '__main__':
main()
|
the-stack_106_30120 | """
Utilities for working with the local dataset cache.
"""
import os
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Tuple, Union, IO
from hashlib import sha256
import requests
import logging
CACHE_ROOT = Path(os.getenv("TAXONERD_CACHE", str(Path.home() / ".taxonerd")))
DATASET_CACHE = str(CACHE_ROOT / "datasets")
logger = logging.getLogger(__name__)
def cached_path(url_or_filename: Union[str, Path, Tuple], cache_dir: str = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = DATASET_CACHE
user_friendly_name = None
if type(url_or_filename) is tuple:
user_friendly_name = url_or_filename[1]
url_or_filename = url_or_filename[0]
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https"):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, user_friendly_name, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == "":
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError(
"unable to parse {} as a URL or as a local path".format(url_or_filename)
)
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
last_part = url.split("/")[-1]
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
filename += "." + last_part
return filename
def filename_to_url(filename: str, cache_dir: str = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = DATASET_CACHE
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
temp_file.write(chunk)
def get_from_cache(url: str, name: str, cache_dir: str = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = DATASET_CACHE
os.makedirs(cache_dir, exist_ok=True)
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError(
"HEAD request failed for url {} with status code {}".format(
url, response.status_code
)
)
etag = response.headers.get("ETag")
url_for_filename = url if not name else url + f"/{name}"
filename = url_to_filename(url_for_filename, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file: # type: IO
logger.info(
f"{url_for_filename} not found in cache, downloading to {temp_file.name}"
)
# GET file object
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info(
f"Finished download, copying {temp_file.name} to cache at {cache_path}"
)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
json.dump(meta, meta_file)
return cache_path
|
the-stack_106_30121 | ###
# Copyright (c) 2010, Daniel Folkinshteyn
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.utils as utils
import supybot.plugins as plugins
import supybot.commands as commands
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
import re
import collections
try:
from supybot.i18n import PluginInternationalization
from supybot.i18n import internationalizeDocstring
_ = PluginInternationalization('Conditional')
except:
# This are useless functions that's allow to run the plugin on a bot
# without the i18n plugin
_ = lambda x:x
internationalizeDocstring = lambda x:x
first = commands.first
many = commands.many
wrap = commands.wrap
getopts = commands.getopts
boolean_or_int = first('int', 'boolean')
class Conditional(callbacks.Plugin):
"""This plugin provides logic operators and other commands that
enable you to run commands only if a condition is true. Useful for nested
commands and scripting."""
threaded = True
def __init__(self, irc):
callbacks.Plugin.__init__(self, irc)
def _runCommandFunction(self, irc, msg, command):
"""Run a command from message, as if command was sent over IRC."""
tokens = callbacks.tokenize(command)
try:
self.Proxy(irc.irc, msg, tokens)
except Exception as e:
self.log.exception('Uncaught exception in requested function:')
@internationalizeDocstring
def cif(self, irc, msg, args, condition, ifcommand, elsecommand):
"""<condition> <ifcommand> <elsecommand>
Runs <ifcommand> if <condition> evaluates to true, runs <elsecommand>
if it evaluates to false.
Use other logical operators defined in this plugin and command nesting
to your advantage here.
"""
if condition:
self._runCommandFunction(irc, msg, ifcommand)
else:
self._runCommandFunction(irc, msg, elsecommand)
irc.noReply()
cif = wrap(cif, [boolean_or_int, 'something', 'something'])
@internationalizeDocstring
def cand(self, irc, msg, args, conds):
"""<cond1> [<cond2> ... <condN>]
Returns true if all conditions supplied evaluate to true.
"""
if all(conds):
irc.reply("true")
else:
irc.reply("false")
cand = wrap(cand, [many(boolean_or_int),])
@internationalizeDocstring
def cor(self, irc, msg, args, conds):
"""<cond1> [<cond2> ... <condN>]
Returns true if any one of conditions supplied evaluates to true.
"""
if any(conds):
irc.reply("true")
else:
irc.reply("false")
cor = wrap(cor, [many(boolean_or_int),])
@internationalizeDocstring
def cxor(self, irc, msg, args, conds):
"""<cond1> [<cond2> ... <condN>]
Returns true if only one of conditions supplied evaluates to true.
"""
if sum(conds) == 1:
irc.reply("true")
else:
irc.reply("false")
cxor = wrap(cxor, [many(boolean_or_int),])
@internationalizeDocstring
def ceq(self, irc, msg, args, item1, item2):
"""<item1> <item2>
Does a string comparison on <item1> and <item2>.
Returns true if they are equal.
"""
if item1 == item2:
irc.reply('true')
else:
irc.reply('false')
ceq = wrap(ceq, ['anything', 'anything'])
@internationalizeDocstring
def ne(self, irc, msg, args, item1, item2):
"""<item1> <item2>
Does a string comparison on <item1> and <item2>.
Returns true if they are not equal.
"""
if item1 != item2:
irc.reply('true')
else:
irc.reply('false')
ne = wrap(ne, ['anything', 'anything'])
@internationalizeDocstring
def gt(self, irc, msg, args, item1, item2):
"""<item1> <item2>
Does a string comparison on <item1> and <item2>.
Returns true if <item1> is greater than <item2>.
"""
if item1 > item2:
irc.reply('true')
else:
irc.reply('false')
gt = wrap(gt, ['anything', 'anything'])
@internationalizeDocstring
def ge(self, irc, msg, args, item1, item2):
"""<item1> <item2>
Does a string comparison on <item1> and <item2>.
Returns true if <item1> is greater than or equal to <item2>.
"""
if item1 >= item2:
irc.reply('true')
else:
irc.reply('false')
ge = wrap(ge, ['anything', 'anything'])
@internationalizeDocstring
def lt(self, irc, msg, args, item1, item2):
"""<item1> <item2>
Does a string comparison on <item1> and <item2>.
Returns true if <item1> is less than <item2>.
"""
if item1 < item2:
irc.reply('true')
else:
irc.reply('false')
lt = wrap(lt, ['anything', 'anything'])
@internationalizeDocstring
def le(self, irc, msg, args, item1, item2):
"""<item1> <item2>
Does a string comparison on <item1> and <item2>.
Returns true if <item1> is less than or equal to <item2>.
"""
if item1 <= item2:
irc.reply('true')
else:
irc.reply('false')
le = wrap(le, ['anything', 'anything'])
@internationalizeDocstring
def match(self, irc, msg, args, optlist, item1, item2):
"""[--case-insensitive] <item1> <item2>
Determines if <item1> is a substring of <item2>.
Returns true if <item1> is contained in <item2>.
Will only match case if --case-insensitive is not given.
"""
optlist = dict(optlist)
if 'case-insensitive' in optlist:
item1 = item1.lower()
item2 = item2.lower()
if item2.find(item1) != -1:
irc.reply('true')
else:
irc.reply('false')
match = wrap(match, [getopts({'case-insensitive': ''}),
'something', 'something'])
@internationalizeDocstring
def nceq(self, irc, msg, args, item1, item2):
"""<item1> <item2>
Does a numeric comparison on <item1> and <item2>.
Returns true if they are equal.
"""
if item1 == item2:
irc.reply('true')
else:
irc.reply('false')
nceq = wrap(nceq, ['float', 'float'])
@internationalizeDocstring
def nne(self, irc, msg, args, item1, item2):
"""<item1> <item2>
Does a numeric comparison on <item1> and <item2>.
Returns true if they are not equal.
"""
if item1 != item2:
irc.reply('true')
else:
irc.reply('false')
nne = wrap(nne, ['float', 'float'])
@internationalizeDocstring
def ngt(self, irc, msg, args, item1, item2):
"""<item1> <item2>
Does a numeric comparison on <item1> and <item2>.
Returns true if <item1> is greater than <item2>.
"""
if item1 > item2:
irc.reply('true')
else:
irc.reply('false')
ngt = wrap(ngt, ['float', 'float'])
@internationalizeDocstring
def nge(self, irc, msg, args, item1, item2):
"""<item1> <item2>
Does a numeric comparison on <item1> and <item2>.
Returns true if <item1> is greater than or equal to <item2>.
"""
if item1 >= item2:
irc.reply('true')
else:
irc.reply('false')
nge = wrap(nge, ['float', 'float'])
@internationalizeDocstring
def nlt(self, irc, msg, args, item1, item2):
"""<item1> <item2>
Does a numeric comparison on <item1> and <item2>.
Returns true if <item1> is less than <item2>.
"""
if item1 < item2:
irc.reply('true')
else:
irc.reply('false')
nlt = wrap(nlt, ['float', 'float'])
@internationalizeDocstring
def nle(self, irc, msg, args, item1, item2):
"""<item1> <item2>
Does a numeric comparison on <item1> and <item2>.
Returns true if <item1> is less than or equal to <item2>.
"""
if item1 <= item2:
irc.reply('true')
else:
irc.reply('false')
nle = wrap(nle, ['float', 'float'])
def cerror(self, irc, msg, args, testcommand):
"""<testcommand>
Runs <testcommand> and returns true if it raises an error;
false otherwise.
"""
tokens = callbacks.tokenize(testcommand)
InvalidCommand = collections.namedtuple('InvalidCommand',
'command')
replies = []
errors = []
class ErrorReportingProxy(self.Proxy):
def reply(self2, s, *args, **kwargs):
replies.append(s)
def error(self2, s, Raise=False, *args, **kwargs):
errors.append(s)
if Raise:
raise ArgumentError
def _callInvalidCommands(self2):
errors.append(InvalidCommand(self2.args))
def evalArgs(self2):
# We don't want the replies in the nested command to
# be stored here.
super(ErrorReportingProxy, self2).evalArgs(withClass=self.Proxy)
try:
ErrorReportingProxy(irc.irc, msg, tokens)
except callbacks.ArgumentError as e:
pass
# TODO: do something with the results
if errors:
irc.reply('true')
else:
irc.reply('false')
cerror = wrap(cerror, ['something'])
Condition = internationalizeDocstring(Conditional)
Class = Conditional
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
the-stack_106_30124 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import csv
import distutils
from distutils import util
import demo_runner as dr
import numpy as np
parser = argparse.ArgumentParser(
description="Running AB test on simulator", add_help=True
)
parser.add_argument("--scene", type=str, default=dr.default_sim_settings["test_scene"])
parser.add_argument(
"--max_frames",
type=int,
default=2000,
help="Max number of frames simulated."
"Default or larger value is suggested for accurate results.",
)
parser.add_argument(
"--resolution",
type=int,
nargs="+",
default=[128, 256, 512],
help="Resolution r for frame (r x r).",
)
parser.add_argument(
"--num_procs",
type=int,
nargs="+",
default=[1, 3, 5],
help="Number of concurrent processes.",
)
parser.add_argument(
"--semantic_sensor",
action="store_true",
help="Whether to enable semantic sensor in the test.",
)
parser.add_argument("--seed", type=int, default=1)
parser.add_argument(
"--enable_physics",
action="store_true",
help="Whether to enable phyiscs (kinematic by default or dynamics if installed with bullet) during the test or not.",
)
parser.add_argument(
"--num_objects",
type=int,
default=10,
help="Number of objects to spawn if enable_physics is true.",
)
parser.add_argument(
"--test_object_index",
type=int,
default=0,
help="Index the objects to spawn if enable_physics is true. -1 indicates random.",
)
parser.add_argument(
"--feature",
type=str,
required=True,
help="the feature that is to be tested. (it must be defined in default_sim_settings",
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-i", "--integer", action="store_true", help="the feature type is integer."
)
group.add_argument(
"-f", "--float", action="store_true", help="the feature type is float."
)
group.add_argument(
"-b", "--boolean", action="store_true", help="the feature type is boolean."
)
group.add_argument(
"-s", "--string", action="store_true", help="the feature type is string."
)
parser.add_argument(
"--control_value",
type=str,
default=argparse.SUPPRESS,
help="the feature value in control group (default: default value in default_settings)",
)
parser.add_argument(
"--test_value", type=str, required=True, help="the feature value in test group."
)
parser.add_argument("--csv", type=str, help="csv output file")
parser.add_argument(
"--speedup", action="store_true", help="display speedup instead of percent change"
)
def get_percent_diff_str(test_val, control_val):
return f"{((test_val - control_val) / control_val) * 100.0:.1f}%"
def get_speedup_str(test_val, control_val):
return f"{test_val / control_val:.1f}x"
def seconds_to_ms(seconds):
return seconds * 1000.0
def print_metric(
performance_data,
resolutions,
title_list,
metric="fps",
comparison_label_generator=None,
metric_transformer=None,
):
for nproc, performance in performance_all.items():
header = f" Performance ({metric}) NPROC={nproc} "
print(f"{header:=^100}")
title = "Resolution".ljust(16)
title += "".join(t.ljust(24) for t in title_list)
print(title)
# break down by resolutions
for resolution, perf in zip(resolutions, performance):
row = f"{resolution} x {resolution}".ljust(16)
# break down by benchmark items
for t in title_list:
control_metric = perf[t][dr.ABTestGroup.CONTROL][metric]
test_metric = perf[t][dr.ABTestGroup.TEST][metric]
comparison_str = comparison_label_generator(test_metric, control_metric)
if metric_transformer:
control_metric = metric_transformer(control_metric)
test_metric = metric_transformer(test_metric)
row += f"{control_metric:6.1f}/{test_metric:6.1f} ({comparison_str:>6})"
print(row)
print(f"{' END ':=^100}")
def get_csv_data(
performance_all, resolutions, title_list, metrics=["fps"], metric_transformer={}
):
fields = ["num_procs", "resolution", "sensor_types"]
for metric in metrics:
fields.append(f"{metric}_control")
fields.append(f"{metric}_test")
rows = []
for nproc, performance in performance_all.items():
for resolution, perf in zip(resolutions, performance):
for t in title_list:
control_perf = perf[t][dr.ABTestGroup.CONTROL]
test_perf = perf[t][dr.ABTestGroup.TEST]
row = dict(num_procs=nproc, resolution=resolution, sensor_types=t)
for metric in metrics:
control_metric = control_perf[metric]
test_metric = test_perf[metric]
if metric_transformer and metric in metric_transformer:
control_metric = metric_transformer[metric](control_metric)
test_metric = metric_transformer[metric](test_metric)
row[f"{metric}_test"] = test_metric
row[f"{metric}_control"] = control_metric
rows.append(row)
return rows, fields
args = parser.parse_args()
control_val = None
if args.integer:
test_val = int(args.test_value)
if "control_value" in args:
control_val = int(args.control_value)
elif args.float:
test_val = float(args.test_value)
if "control_value" in args:
control_val = float(args.control_value)
elif args.boolean:
test_val = distutils.util.strtobool(args.test_value)
if "control_value" in args:
control_val = distutils.util.strtobool(args.control_value)
elif args.string:
test_val = args.test_value
if "control_value" in args:
control_val = args.control_value
default_settings = dr.default_sim_settings.copy()
if not (args.feature in default_settings.keys()):
raise RuntimeError("Feature to be tested is not defined in default_sim_settings.")
if args.feature == "max_frames":
raise RuntimeError("Feature cannot be the max_frames.")
default_settings["scene"] = args.scene
default_settings["silent"] = True
default_settings["seed"] = args.seed
default_settings["save_png"] = False
default_settings["print_semantic_scene"] = False
default_settings["print_semantic_mask_stats"] = False
default_settings["compute_shortest_path"] = False
default_settings["compute_action_shortest_path"] = False
default_settings["max_frames"] = args.max_frames
# set the control value into the default setting
if control_val != None:
default_settings[args.feature] = control_val
else:
control_val = default_settings[args.feature]
print(
f"==== feature {args.feature}, control value: {control_val}, test value: {test_val} ===="
)
benchmark_items = {
"rgb": {},
"rgbd": {"depth_sensor": True},
"depth_only": {"color_sensor": False, "depth_sensor": True},
}
if args.semantic_sensor:
benchmark_items["semantic_only"] = {"color_sensor": False, "semantic_sensor": True}
benchmark_items["rgbd_semantic"] = {"depth_sensor": True, "semantic_sensor": True}
if args.enable_physics:
# TODO: cannot benchmark physics with no sensors as this won't create a renderer or load the scene.
# benchmark_items["enable_physics_no_obs"] = {"color_sensor": False, "enable_physics": True}
benchmark_items["phys_rgb"] = {"enable_physics": True}
benchmark_items["phys_rgbd"] = {"depth_sensor": True, "enable_physics": True}
default_settings["num_objects"] = args.num_objects
default_settings["test_object_index"] = args.test_object_index
resolutions = args.resolution
nprocs_tests = args.num_procs
performance_all = {}
title_list = []
collect_title_list = True
for nprocs in nprocs_tests:
default_settings["num_processes"] = nprocs
performance = []
for resolution in resolutions:
default_settings["width"] = default_settings["height"] = resolution
resolution_label = f"{resolution} x {resolution}"
per_resolution_perf = {}
for key, value in benchmark_items.items():
per_group_perf = {}
for g in dr.ABTestGroup:
demo_runner = dr.DemoRunner(default_settings, dr.DemoRunnerType.AB_TEST)
run_label = f"(nprocs={nprocs}, resolution={resolution_label}, sensors={key}, group={g.name})"
print(f"{f' Starting run {run_label} ':-^100}")
settings = default_settings.copy()
settings.update(value)
# set new value before the test group run
if g == dr.ABTestGroup.TEST:
settings[args.feature] = test_val
per_group_perf[g] = demo_runner.benchmark(settings, g)
result = f" FPS {run_label}: {per_group_perf[g]['fps']:.1f} "
print(f"{result:-^100}")
if collect_title_list:
title_list.append(key)
per_resolution_perf[key] = per_group_perf
collect_title_list = False
performance.append(per_resolution_perf)
performance_all[nprocs] = performance
comparison_label_generator = get_speedup_str if args.speedup else get_percent_diff_str
print_metric(
performance_all,
resolutions,
title_list,
metric="fps",
comparison_label_generator=comparison_label_generator,
)
print_metric(
performance_all,
resolutions,
title_list,
metric="frame_time",
metric_transformer=seconds_to_ms,
comparison_label_generator=comparison_label_generator,
)
if args.enable_physics:
print_metric(performance_all, resolutions, title_list, metric="avg_sim_step_time")
if args.csv:
with open(args.csv, "w", newline="") as csv_file:
print(f"Writing csv results to {args.csv}")
metrics = ["fps"]
if args.enable_physics:
metrics.append("avg_sim_step_time")
rows, fields = get_csv_data(
performance_all,
resolutions,
title_list,
metrics=metrics,
metric_transformer={"avg_sim_step_time": seconds_to_ms},
)
writer = csv.DictWriter(csv_file, fieldnames=fields)
writer.writeheader()
writer.writerows(rows)
|
the-stack_106_30128 | #!/usr/bin/env python3
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Python utility to run opt and counters benchmarks and save json output """
import argparse
import itertools
import multiprocessing
import os
import random
import subprocess
import sys
import bm_constants
import jobset
sys.path.append(
os.path.join(os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
'python_utils'))
def _args():
argp = argparse.ArgumentParser(description='Runs microbenchmarks')
argp.add_argument('-b',
'--benchmarks',
nargs='+',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help='Benchmarks to run')
argp.add_argument('-j',
'--jobs',
type=int,
default=multiprocessing.cpu_count(),
help='Number of CPUs to use')
argp.add_argument(
'-n',
'--name',
type=str,
help=
'Unique name of the build to run. Needs to match the handle passed to bm_build.py'
)
argp.add_argument('-r',
'--regex',
type=str,
default="",
help='Regex to filter benchmarks run')
argp.add_argument(
'-l',
'--loops',
type=int,
default=20,
help=
'Number of times to loops the benchmarks. More loops cuts down on noise'
)
argp.add_argument('--counters', dest='counters', action='store_true')
argp.add_argument('--no-counters', dest='counters', action='store_false')
argp.set_defaults(counters=True)
args = argp.parse_args()
assert args.name
if args.loops < 3:
print("WARNING: This run will likely be noisy. Increase loops to at "
"least 3.")
return args
def _collect_bm_data(bm, cfg, name, regex, idx, loops):
jobs_list = []
for line in subprocess.check_output([
'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_list_tests',
'--benchmark_filter=%s' % regex
]).splitlines():
line = line.decode('UTF-8')
stripped_line = line.strip().replace("/",
"_").replace("<", "_").replace(
">", "_").replace(", ", "_")
cmd = [
'bm_diff_%s/%s/%s' % (name, cfg, bm),
'--benchmark_filter=^%s$' % line,
'--benchmark_out=%s.%s.%s.%s.%d.json' %
(bm, stripped_line, cfg, name, idx),
'--benchmark_out_format=json',
]
jobs_list.append(
jobset.JobSpec(cmd,
shortname='%s %s %s %s %d/%d' %
(bm, line, cfg, name, idx + 1, loops),
verbose_success=True,
cpu_cost=2,
timeout_seconds=60 * 60)) # one hour
return jobs_list
def create_jobs(name, benchmarks, loops, regex, counters):
jobs_list = []
for loop in range(0, loops):
for bm in benchmarks:
jobs_list += _collect_bm_data(bm, 'opt', name, regex, loop, loops)
if counters:
jobs_list += _collect_bm_data(bm, 'counters', name, regex, loop,
loops)
random.shuffle(jobs_list, random.SystemRandom().random)
return jobs_list
if __name__ == '__main__':
args = _args()
jobs_list = create_jobs(args.name, args.benchmarks, args.loops, args.regex,
args.counters)
jobset.run(jobs_list, maxjobs=args.jobs)
|
the-stack_106_30129 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_age_limit,
parse_iso8601,
xpath_text,
)
class VideomoreIE(InfoExtractor):
IE_NAME = 'videomore'
_VALID_URL = r'videomore:(?P<sid>\d+)$|https?://videomore\.ru/(?:(?:embed|[^/]+/[^/]+)/|[^/]+\?.*\btrack_id=)(?P<id>\d+)(?:[/?#&]|\.(?:xml|json)|$)'
_TESTS = [{
'url': 'http://videomore.ru/kino_v_detalayah/5_sezon/367617',
'md5': '70875fbf57a1cd004709920381587185',
'info_dict': {
'id': '367617',
'ext': 'flv',
'title': 'В гостях Алексей Чумаков и Юлия Ковальчук',
'description': 'В гостях – лучшие романтические комедии года, «Выживший» Иньярриту и «Стив Джобс» Дэнни Бойла.',
'series': 'Кино в деталях',
'episode': 'В гостях Алексей Чумаков и Юлия Ковальчук',
'episode_number': None,
'season': 'Сезон 2015',
'season_number': 5,
'thumbnail': 're:^https?://.*\.jpg',
'duration': 2910,
'age_limit': 16,
'view_count': int,
},
}, {
'url': 'http://videomore.ru/embed/259974',
'info_dict': {
'id': '259974',
'ext': 'flv',
'title': '80 серия',
'description': '«Медведей» ждет решающий матч. Макеев выясняет отношения со Стрельцовым. Парни узнают подробности прошлого Макеева.',
'series': 'Молодежка',
'episode': '80 серия',
'episode_number': 40,
'season': '2 сезон',
'season_number': 2,
'thumbnail': 're:^https?://.*\.jpg',
'duration': 2809,
'age_limit': 16,
'view_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://videomore.ru/molodezhka/sezon_promo/341073',
'info_dict': {
'id': '341073',
'ext': 'flv',
'title': 'Команда проиграла из-за Бакина?',
'description': 'Молодежка 3 сезон скоро',
'series': 'Молодежка',
'episode': 'Команда проиграла из-за Бакина?',
'episode_number': None,
'season': 'Промо',
'season_number': 99,
'thumbnail': 're:^https?://.*\.jpg',
'duration': 29,
'age_limit': 16,
'view_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://videomore.ru/elki_3?track_id=364623',
'only_matching': True,
}, {
'url': 'http://videomore.ru/embed/364623',
'only_matching': True,
}, {
'url': 'http://videomore.ru/video/tracks/364623.xml',
'only_matching': True,
}, {
'url': 'http://videomore.ru/video/tracks/364623.json',
'only_matching': True,
}, {
'url': 'http://videomore.ru/video/tracks/158031/quotes/33248',
'only_matching': True,
}, {
'url': 'videomore:367617',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<object[^>]+data=(["\'])https?://videomore.ru/player\.swf\?.*config=(?P<url>https?://videomore\.ru/(?:[^/]+/)+\d+\.xml).*\1',
webpage)
if mobj:
return mobj.group('url')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('sid') or mobj.group('id')
video = self._download_xml(
'http://videomore.ru/video/tracks/%s.xml' % video_id,
video_id, 'Downloading video XML')
video_url = xpath_text(video, './/video_url', 'video url', fatal=True)
formats = self._extract_f4m_formats(video_url, video_id, f4m_id='hds')
self._sort_formats(formats)
data = self._download_json(
'http://videomore.ru/video/tracks/%s.json' % video_id,
video_id, 'Downloading video JSON')
title = data.get('title') or data['project_title']
description = data.get('description') or data.get('description_raw')
timestamp = parse_iso8601(data.get('published_at'))
duration = int_or_none(data.get('duration'))
view_count = int_or_none(data.get('views'))
age_limit = parse_age_limit(data.get('min_age'))
thumbnails = [{
'url': thumbnail,
} for thumbnail in data.get('big_thumbnail_urls', [])]
series = data.get('project_title')
episode = data.get('title')
episode_number = int_or_none(data.get('episode_of_season') or None)
season = data.get('season_title')
season_number = int_or_none(data.get('season_pos') or None)
return {
'id': video_id,
'title': title,
'description': description,
'series': series,
'episode': episode,
'episode_number': episode_number,
'season': season,
'season_number': season_number,
'thumbnails': thumbnails,
'timestamp': timestamp,
'duration': duration,
'view_count': view_count,
'age_limit': age_limit,
'formats': formats,
}
class VideomoreVideoIE(InfoExtractor):
IE_NAME = 'videomore:video'
_VALID_URL = r'https?://videomore\.ru/(?:(?:[^/]+/){2})?(?P<id>[^/?#&]+)[/?#&]*$'
_TESTS = [{
# single video with og:video:iframe
'url': 'http://videomore.ru/elki_3',
'info_dict': {
'id': '364623',
'ext': 'flv',
'title': 'Ёлки 3',
'description': '',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 5579,
'age_limit': 6,
'view_count': int,
},
'params': {
'skip_download': True,
},
}, {
# season single series with og:video:iframe
'url': 'http://videomore.ru/poslednii_ment/1_sezon/14_seriya',
'only_matching': True,
}, {
'url': 'http://videomore.ru/sejchas_v_seti/serii_221-240/226_vypusk',
'only_matching': True,
}, {
# single video without og:video:iframe
'url': 'http://videomore.ru/marin_i_ego_druzya',
'info_dict': {
'id': '359073',
'ext': 'flv',
'title': '1 серия. Здравствуй, Аквавилль!',
'description': 'md5:c6003179538b5d353e7bcd5b1372b2d7',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 754,
'age_limit': 6,
'view_count': int,
},
'params': {
'skip_download': True,
},
}]
@classmethod
def suitable(cls, url):
return False if VideomoreIE.suitable(url) else super(VideomoreVideoIE, cls).suitable(url)
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_url = self._og_search_property(
'video:iframe', webpage, 'video url', default=None)
if not video_url:
video_id = self._search_regex(
(r'config\s*:\s*["\']https?://videomore\.ru/video/tracks/(\d+)\.xml',
r'track-id=["\'](\d+)',
r'xcnt_product_id\s*=\s*(\d+)'), webpage, 'video id')
video_url = 'videomore:%s' % video_id
return self.url_result(video_url, VideomoreIE.ie_key())
class VideomoreSeasonIE(InfoExtractor):
IE_NAME = 'videomore:season'
_VALID_URL = r'https?://videomore\.ru/(?!embed)(?P<id>[^/]+/[^/?#&]+)[/?#&]*$'
_TESTS = [{
'url': 'http://videomore.ru/molodezhka/sezon_promo',
'info_dict': {
'id': 'molodezhka/sezon_promo',
'title': 'Молодежка Промо',
},
'playlist_mincount': 12,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
title = self._og_search_title(webpage)
entries = [
self.url_result(item) for item in re.findall(
r'<a[^>]+href="((?:https?:)?//videomore\.ru/%s/[^/]+)"[^>]+class="widget-item-desc"'
% display_id, webpage)]
return self.playlist_result(entries, display_id, title)
|
the-stack_106_30132 | # -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.notebook import Notebook
from ..model.onenote_section import OnenoteSection
from ..one_drive_object_base import OneDriveObjectBase
class SectionGroup(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def sections_url(self):
"""
Gets and sets the sectionsUrl
Returns:
str:
The sectionsUrl
"""
if "sectionsUrl" in self._prop_dict:
return self._prop_dict["sectionsUrl"]
else:
return None
@sections_url.setter
def sections_url(self, val):
self._prop_dict["sectionsUrl"] = val
@property
def section_groups_url(self):
"""
Gets and sets the sectionGroupsUrl
Returns:
str:
The sectionGroupsUrl
"""
if "sectionGroupsUrl" in self._prop_dict:
return self._prop_dict["sectionGroupsUrl"]
else:
return None
@section_groups_url.setter
def section_groups_url(self, val):
self._prop_dict["sectionGroupsUrl"] = val
@property
def parent_notebook(self):
"""
Gets and sets the parentNotebook
Returns:
:class:`Notebook<onedrivesdk.model.notebook.Notebook>`:
The parentNotebook
"""
if "parentNotebook" in self._prop_dict:
if isinstance(self._prop_dict["parentNotebook"], OneDriveObjectBase):
return self._prop_dict["parentNotebook"]
else :
self._prop_dict["parentNotebook"] = Notebook(self._prop_dict["parentNotebook"])
return self._prop_dict["parentNotebook"]
return None
@parent_notebook.setter
def parent_notebook(self, val):
self._prop_dict["parentNotebook"] = val
@property
def parent_section_group(self):
"""
Gets and sets the parentSectionGroup
Returns:
:class:`SectionGroup<onedrivesdk.model.section_group.SectionGroup>`:
The parentSectionGroup
"""
if "parentSectionGroup" in self._prop_dict:
if isinstance(self._prop_dict["parentSectionGroup"], OneDriveObjectBase):
return self._prop_dict["parentSectionGroup"]
else :
self._prop_dict["parentSectionGroup"] = SectionGroup(self._prop_dict["parentSectionGroup"])
return self._prop_dict["parentSectionGroup"]
return None
@parent_section_group.setter
def parent_section_group(self, val):
self._prop_dict["parentSectionGroup"] = val
@property
def sections(self):
"""Gets and sets the sections
Returns:
:class:`SectionsCollectionPage<onedrivesdk.request.sections_collection.SectionsCollectionPage>`:
The sections
"""
if "sections" in self._prop_dict:
return SectionsCollectionPage(self._prop_dict["sections"])
else:
return None
@property
def section_groups(self):
"""Gets and sets the sectionGroups
Returns:
:class:`SectionGroupsCollectionPage<onedrivesdk.request.section_groups_collection.SectionGroupsCollectionPage>`:
The sectionGroups
"""
if "sectionGroups" in self._prop_dict:
return SectionGroupsCollectionPage(self._prop_dict["sectionGroups"])
else:
return None
|
the-stack_106_30133 | from run_cm2_10 import run_test
TEST_DIR = "./supplementary/Real_Life_Example_Lycopene_Operon/"
def test_cm2_lycopene_10():
"""
Test CM_2
Lycopene Sanger
10 targets
Threshold: 0.7
"""
test_params = {
"name": "CM_2: Lycopene - 10 targets - Sanger - Threshold: 0.y",
"id": "cm2-lycopene-10targets-sanger-th07",
"targets": {
"file": f"{TEST_DIR}/targets/target_lycopene_sanger_10.json",
},
"template": f"{TEST_DIR}/templates/template_lycopene_sanger.json",
"nbloop": 1,
"threshold": 0.7,
}
run_test(test_params)
def main():
"""
Main
"""
test_cm2_lycopene_10()
if __name__ == "__main__":
main()
|
the-stack_106_30136 | import traceback
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
try:
method = getattr(self, self._task.args['method'])
args = tuple(self._task.args.get('args', ()))
kwargs = self._task.args.get('kwargs', {})
return {
'changed': False,
'failed': False,
'result': method(*args, **kwargs)
}
except Exception as e:
traceback.print_exc()
return {
'changed': False,
'failed': True,
'msg': str(e),
'result': e,
}
|
the-stack_106_30138 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def clear_status(apps, schema_editor):
m = apps.get_model('core.Declaration')
m.objects.update(confirmed="p")
class Migration(migrations.Migration):
dependencies = [
('core', '0063_auto_20151226_2241'),
]
operations = [
migrations.RunPython(clear_status),
]
|
the-stack_106_30140 | #!/usr/bin/env python
from kapteyn import wcsgrat, maputils
from matplotlib.pyplot import figure, show
fig = figure()
myCubes = maputils.Cubes(fig, toolbarinfo=True, printload=False,
helptext=False, imageinfo=True)
# Create a maputils FITS object from a FITS file on disk
fitsobject = maputils.FITSimage('ngc6946.fits')
naxis3 = fitsobject.hdr['NAXIS3']
# Note that slice positions follow FITS syntax, i.e. start at 1
slicepos = list(range(1,naxis3+1))
frame = fig.add_subplot(1,1,1)
vmin, vmax = fitsobject.get_dataminmax()
cube = myCubes.append(frame, fitsobject, (1,2), slicepos,
vmin=vmin, vmax=vmax, hasgraticule=True)
show()
|
the-stack_106_30141 | # --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
from .util import *
import time
import torch
import IPython
from layers.sdf_matching_loss import SDFLoss
class Cost(object):
"""
Cost class that computes obstacle, grasp, and smoothness and their gradients.
"""
def __init__(self, env):
self.env = env
self.cfg = env.config
self.sdf_loss = SDFLoss()
if len(self.env.objects) > 0:
self.target_obj = self.env.objects[self.env.target_idx]
def functional_grad(self, v, a, JT, ws_cost, ws_grad):
"""
Compute functional gradient based on workspace cost.
"""
p = v.shape[-2]
vel_norm = np.linalg.norm(v, axis=-1, keepdims=True) # n x p x 1
cost = np.sum(ws_cost * vel_norm[..., 0], axis=-1) # n
normalized_vel = safe_div(v, vel_norm) # p x 3
proj_mat = np.eye(3) - np.matmul(
normalized_vel[..., None], normalized_vel[..., None, :]
) # p x 3 x 3
scaled_curvature = ws_cost[..., None, None] * safe_div(
np.matmul(proj_mat, a[..., None]), vel_norm[..., None] ** 2
) # p x 3 x 1
projected_grad = np.matmul(proj_mat, ws_grad[..., None]) # p x 3 x 1
grad = np.sum(
np.matmul(JT, (vel_norm[..., None] * projected_grad - scaled_curvature)),
axis=-1,
)
return cost, grad
def forward_poses(self, joints):
"""
Compute forward kinematics for link poses and joint origins and axis
"""
robot = self.env.robot
(
robot_poses,
joint_origins,
joint_axis,
) = robot.robot_kinematics.forward_kinematics_parallel(
joints[None, ...], base_link=self.cfg.base_link, return_joint_info=True
)
return robot_poses[0], joint_origins[0], joint_axis[0]
def forward_points(self, pose, pts, normals=None):
"""
Map the points through forward kinematics.
"""
r = pose[..., :3, :3]
t = pose[..., :3, [3]]
x = np.matmul(r, pts[None, ...]) + t
if normals is None:
return x.transpose([3, 1, 0, 2]) # p x n x m x 3
else:
normal = np.matmul(r, normals[None, ...])
x = np.concatenate([x, normal], 2)
return x.transpose([3, 1, 0, 2])
def color_point(self, vis_pts, collide):
"""
Color the sampled points based on relative sdf value.
Green is closed to obstacle, blue is far, yellow is gradient direction
"""
p_max, p_min = (
np.amax(vis_pts[..., 6], axis=(-2, -1))[..., None, None],
np.amin(vis_pts[..., 6], axis=(-2, -1))[..., None, None],
)
vis_pts[..., 6] = 255 * safe_div(
vis_pts[..., 6] - p_min, (p_max - p_min + 1e-8)
)
vis_pts[..., 7] = 255 - vis_pts[..., 6]
if type(collide) is torch.Tensor:
collide = collide.detach().cpu().numpy()
collide = collide.astype(np.bool)
vis_pts[collide, 6:9] = 255, 0, 0
def compute_point_jacobian(
self, joint_origin, x, joint_axis, potentials, type="revolute"
):
"""
Compute jacobian transpose for each point on a link.
"""
p, n = x.shape[:2]
x = x.transpose([1, 0, 2])[:, :, None, :]
m = joint_axis.shape[1]
jacobian = np.zeros([n, p, m, 6])
jacobian[..., :3] = np.cross(
joint_axis[:, None, ...], x - joint_origin[:, None, ...]
)
jacobian[..., 3:] = joint_axis[:, None, ...]
if type == "prsimatic": # finger joint
jacobian[..., -1, :3] = joint_axis[:, [-1], :]
jacobian[..., -1, 3:] = 0
return jacobian
def forward_kinematics_obstacle(self, xi, start, end, arc_length=True):
"""
Instead of computing C space velocity and using Jacobian,
we differentiate workspace positions for velocity and acceleration.
"""
robot = self.env.robot
robot_pts = robot.collision_points.transpose([0, 2, 1]) # m x 3 x p
n, m = xi.shape[0], xi.shape[1]
p = robot_pts.shape[2]
vis_pts = np.zeros([n, m + 1, p, 12])
Js = [] # (m + 1) x n x p x j x 6
(
robot_poses,
joint_origins,
joint_axis,
) = self.env.robot.robot_kinematics.forward_kinematics_parallel(
wrap_values(xi), return_joint_info=True
)
ws_positions = self.forward_points(
robot_poses, robot_pts
) # p x (m + 1) x n x 3
potentials, potential_grads, collide = self.compute_obstacle_cost_layer(
torch.from_numpy(ws_positions).cuda().float().permute(2, 1, 0, 3),
vis_pts,
uncheck_finger_collision=self.cfg.uncheck_finger_collision,
special_check_id=self.env.target_idx,
)
potentials = potentials.detach().cpu().numpy()
potential_grads = potential_grads.detach().cpu().numpy()
collide = collide.detach().cpu().numpy()
self.color_point(vis_pts, collide)
""" compute per point jacobian """
for j in range(m + 1):
Js.append(
self.compute_point_jacobian(
joint_origins[:, wrap_joint(j + 1)],
ws_positions[:, j],
joint_axis[:, wrap_joint(j + 1)],
potentials[:, j],
"prsimatic" if j >= 8 else "revolute",
)
)
""" endpoint """
if arc_length:
robot_poses_start = self.forward_poses(wrap_value(start))[0]
robot_poses_end = self.forward_poses(wrap_value(end))[0]
ws_positions_start = self.forward_points(
robot_poses_start[None, ...], robot_pts)[:, :, 0]
ws_positions_end = self.forward_points(
robot_poses_end[None, ...], robot_pts)[:, :, 0]
""" get derivative """
ws_velocity = self.cfg.get_derivative(
ws_positions, ws_positions_start, ws_positions_end, 1
)
ws_acceleration = self.cfg.get_derivative(
ws_positions, ws_positions_start, ws_positions_end, 2
)
ws_velocity = ws_velocity.transpose([2, 1, 0, 3])
ws_acceleration = ws_acceleration.transpose(
[2, 1, 0, 3]
) # n x (m + 1) x p x 3
ws_positions = ws_positions.transpose([2, 1, 0, 3])
return (
ws_positions,
ws_velocity,
ws_acceleration,
Js,
potentials,
potential_grads,
vis_pts,
collide.sum(),
)
else:
return Js, potentials, potential_grads, collide.sum()
def batch_obstacle_cost(
self,
joints,
arc_length=-1,
only_collide=False,
special_check_id=0,
uncheck_finger_collision=-1,
start=None,
end=None,
):
"""
Compute obstacle cost given a batch of joints
"""
s = time.time()
robot_pts = self.env.robot.collision_points.transpose([0, 2, 1])
robot_poses = []
robot_poses = self.env.robot.robot_kinematics.forward_kinematics_parallel(
wrap_values(joints)
)
if self.cfg.report_time:
print("fk time:", time.time() - s)
ws_positions = self.forward_points(
robot_poses, robot_pts
) # p x (m + 1) x n x 3
ws_positions = torch.from_numpy(ws_positions).cuda().float()
vis_pts = np.zeros(
[ws_positions.shape[2], ws_positions.shape[1], ws_positions.shape[0], 12]
)
collision_check_func = self.compute_obstacle_cost_layer
s = time.time()
potentials, grad, collide = collision_check_func(
ws_positions.permute(2, 1, 0, 3),
vis_pts=vis_pts,
special_check_id=special_check_id,
uncheck_finger_collision=uncheck_finger_collision,
)
self.color_point(vis_pts, collide)
if self.cfg.report_time:
print("obstacle time:", time.time() - s)
s = time.time()
if arc_length > 0: # p x m x g x n x 3
ws_positions = ws_positions.reshape(
ws_positions.shape[0], ws_positions.shape[1], -1, arc_length, 3
)
ws_positions = ws_positions.permute(2, 1, 0, 3, 4) # g x m x p x n x 3
robot_poses_start = self.forward_poses(wrap_value(start))[0]
robot_poses_end = (
self.env.robot.robot_kinematics.forward_kinematics_parallel(
wrap_values(end)
)
)
robot_poses_start = np.tile(
robot_poses_start, (robot_poses_end.shape[0], 1, 1, 1)
)
ws_positions_start = self.forward_points(
robot_poses_start, robot_pts
) # p x m x g x 3
ws_positions_end = self.forward_points(robot_poses_end, robot_pts)
ws_positions_start = (
torch.from_numpy(ws_positions_start).cuda().float().permute(2, 1, 0, 3)
)
ws_positions_end = (
torch.from_numpy(ws_positions_end).cuda().float().permute(2, 1, 0, 3)
)
ws_velocities = self.cfg.get_derivative_torch(
ws_positions, ws_positions_start, ws_positions_end
)
ws_velocities = ws_velocities.transpose(1, 3) # g x n x p x m x 3
ws_velocities = torch.norm(
ws_velocities.reshape(
[
-1,
ws_velocities.shape[2],
ws_velocities.shape[3],
ws_velocities.shape[4],
]
),
dim=-1,
).transpose(-1, -2)
potentials = potentials * ws_velocities # (g x n) x m x p
if self.cfg.report_time:
print("arc length time:", time.time() - s)
if only_collide:
collide_mask = (
potentials
> 0.5 * (self.cfg.epsilon - self.cfg.clearance) ** 2 / self.cfg.epsilon
).any()
potentials = potentials * collide_mask
return potentials, grad, vis_pts, collide
def compute_obstacle_cost_layer(
self,
ws_positions,
vis_pts=None,
special_check_id=0,
uncheck_finger_collision=-1,
grad_free=True,
):
"""
Compute obstacle cost and gradient from sdf, take in torch cuda tensor
"""
# prepare data
n, m, p, _ = ws_positions.shape
points = ws_positions.reshape([-1, 3])
num_objects = len(self.env.objects)
poses = np.zeros((num_objects, 4, 4), dtype=np.float32)
epsilons = np.zeros((num_objects,), dtype=np.float32)
padding_scales = np.zeros((num_objects,), dtype=np.float32)
clearances = np.zeros((num_objects,), dtype=np.float32)
disables = np.zeros((num_objects,), dtype=np.float32)
for idx, obs in enumerate(self.env.objects):
if obs.name == "floor" or obs.name in self.cfg.disable_collision_set:
disables[idx] = 1
padding_scale = 1
eps = self.cfg.epsilon
clearances[idx] = self.cfg.clearance
epsilons[idx] = eps
padding_scales[idx] = padding_scale
poses[idx] = se3_inverse(obs.pose_mat)
# forward layer
poses = torch.from_numpy(poses).cuda()
epsilons = torch.from_numpy(epsilons).cuda()
clearances = torch.from_numpy(clearances).cuda()
padding_scales = torch.from_numpy(padding_scales).cuda()
disables = torch.from_numpy(disables).cuda()
potentials, potential_grads, collides = self.sdf_loss(
poses,
self.env.sdf_torch,
self.env.sdf_limits,
points,
epsilons,
padding_scales,
clearances,
disables,
)
potentials = potentials.reshape([n, m, p])
potential_grads = potential_grads.reshape([n, m, p, 3])
collides = collides.reshape([n, m, p])
if self.cfg.use_standoff and self.cfg.goal_set_proj:
potentials[-self.cfg.reach_tail_length :] = 0
potential_grads[-self.cfg.reach_tail_length :] = 0
collides[-self.cfg.reach_tail_length :] = 0
if uncheck_finger_collision == -1:
potentials[:, -2:] *= 0.1 # soft
potential_grads[:, -2:] *= 0.1 # soft
collides[:, -2:] = 0
if vis_pts is not None:
vis_pts[:, :m, :, :3] = points.reshape([n, m, p, 3]).detach().cpu().numpy()
vis_pts[:, :m:, :, 6] = potentials.detach().cpu().numpy()
vis_pts[:, :m:, :, 9:] = potential_grads.detach().cpu().numpy()
return potentials, potential_grads, collides
def compute_collision_loss(self, xi, start, end):
"""
Computes obstacle loss
"""
n, m = xi.shape[0], xi.shape[1]
obs_grad = np.zeros_like(xi)
obs_cost = np.zeros([n, m + 1])
(
x,
v,
a,
Js,
potentials,
potential_grads,
vis_pts,
collide,
) = self.forward_kinematics_obstacle(xi, start, end)
if self.cfg.top_k_collision == 0:
for j in range(m + 1):
J = np.array(Js[j])[..., :3]
obs_cost_i, obs_grad_i = self.functional_grad(
v[:, j], a[:, j], J, potentials[:, j], potential_grads[:, j]
)
obs_grad_i = obs_grad_i.sum(1)
obs_cost[:, j] += obs_cost_i
obs_grad[:, wrap_index(j + 1)] += obs_grad_i.reshape([n, -1])
else:
# top k collision points in the whole trajectory
topk = np.unravel_index(np.argsort(potentials.flatten()), potentials.shape)
top_n, top_m, top_p = (
topk[0][-self.cfg.top_k_collision :],
topk[1][-self.cfg.top_k_collision :],
topk[2][-self.cfg.top_k_collision :],
)
top_potentials = potentials[top_n, top_m, top_p]
vis_pts[top_n, top_m, top_p, 6:9] = [235, 52, 195]
if not self.cfg.consider_finger:
m = m - 2
for j in range(m + 1):
if np.isin(j, top_m):
mask = j == top_m
select_n, select_p = top_n[mask], top_p[mask]
J = np.array(Js[j])[select_n, select_p, :, :3]
obs_cost_i, obs_grad_i = self.functional_grad(
v[select_n, j, select_p],
a[select_n, j, select_p],
J,
potentials[select_n, j, select_p],
potential_grads[select_n, j, select_p],
)
obs_cost[:, j] += obs_cost_i
select_m = wrap_index(j + 1)
select_n, select_m = np.repeat(select_n, len(select_m)), np.tile(
select_m, len(select_n)
)
obs_grad[select_n, select_m] += obs_grad_i.flatten()
return obs_cost, obs_grad, vis_pts, collide.sum()
def compute_smooth_loss(self, xi, start, end):
"""
Computes smoothness loss
"""
link_smooth_weight = np.array(self.cfg.link_smooth_weight)[None]
ed = np.zeros([xi.shape[0] + 1, xi.shape[1]])
ed[0] = (
self.cfg.diff_rule[0][self.cfg.diff_rule_length // 2 - 1]
* start
/ self.cfg.time_interval
)
if not self.cfg.goal_set_proj:
ed[-1] = (
self.cfg.diff_rule[0][self.cfg.diff_rule_length // 2]
* end
/ self.cfg.time_interval
)
velocity = self.cfg.diff_matrices[0].dot(xi) #
velocity_norm = np.linalg.norm((velocity + ed) * link_smooth_weight, axis=1)
smoothness_loss = 0.5 * velocity_norm ** 2 #
smoothness_grad = self.cfg.A.dot(xi) + self.cfg.diff_matrices[0].T.dot(ed)
smoothness_grad *= link_smooth_weight
return smoothness_loss, smoothness_grad
def compute_total_loss(self, traj):
"""
Compute total losses, gradients, and other info
"""
smoothness_loss, smoothness_grad = self.compute_smooth_loss(
traj.data, traj.start, traj.end
)
(
obstacle_loss,
obstacle_grad,
collision_pts,
collide,
) = self.compute_collision_loss(traj.data, traj.start, traj.end)
smoothness_loss_sum = smoothness_loss.sum()
obstacle_loss_sum = obstacle_loss.sum()
weighted_obs = self.cfg.obstacle_weight * obstacle_loss_sum
weighted_smooth = self.cfg.smoothness_weight * smoothness_loss_sum
weighted_obs_grad = self.cfg.obstacle_weight * obstacle_grad
weighted_obs_grad = np.clip(
weighted_obs_grad, -self.cfg.clip_grad_scale, self.cfg.clip_grad_scale
)
weighted_smooth_grad = self.cfg.smoothness_weight * smoothness_grad
cost = weighted_obs + weighted_smooth
grad = weighted_obs_grad + weighted_smooth_grad
cost_traj = (
self.cfg.obstacle_weight * obstacle_loss.sum(-1)
+ self.cfg.smoothness_weight * smoothness_loss[:-1]
)
goal_dist = (
np.linalg.norm(traj.data[-1] - traj.goal_set[traj.goal_idx])
if self.cfg.goal_set_proj
else 0
)
terminate = (
(collide <= self.cfg.allow_collision_point)
and self.cfg.pre_terminate
and (goal_dist < 0.01)
and smoothness_loss_sum < self.cfg.terminate_smooth_loss
)
execute = (collide <= self.cfg.allow_collision_point) and (
smoothness_loss_sum < self.cfg.terminate_smooth_loss
)
standoff_idx = (
len(traj.data) - self.cfg.reach_tail_length
if self.cfg.use_standoff
else len(traj.data) - 1
)
info = {
"collision_pts": collision_pts,
"obs": obstacle_loss_sum,
"smooth": smoothness_loss_sum,
"grasp": 0,
"weighted_obs": weighted_obs,
"weighted_smooth": weighted_smooth,
"weighted_smooth_grad": np.linalg.norm(weighted_smooth_grad),
"weighted_obs_grad": np.linalg.norm(weighted_obs_grad),
"weighted_grasp_grad": 0,
"weighted_grasp": 0,
"gradient": grad,
"cost": cost,
"grad": np.linalg.norm(grad),
"terminate": terminate,
"collide": collide,
"standoff_idx": standoff_idx,
"reach": goal_dist,
"execute": execute,
"cost_traj": cost_traj,
}
return cost, grad, info
|
the-stack_106_30142 | # Write a Python Program to implement your own myreduce() function which works exactly
# like Python's built-in function reduce()
def myreduce(fnc, seq):
count = seq[0]
for next in seq[1:]:
count = fnc(count, next)
return count
myreduce( (lambda x, y: x + y), [1, 2, 3, 4, 5])
|
the-stack_106_30143 | from textwrap import dedent
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from ._core import (
VectorPlotter,
)
from .utils import (
ci_to_errsize,
locator_to_legend_entries,
ci as ci_func
)
from .algorithms import bootstrap
from .axisgrid import FacetGrid, _facet_docs
from ._decorators import _deprecate_positional_args
__all__ = ["relplot", "scatterplot", "lineplot"]
class _RelationalPlotter(VectorPlotter):
wide_structure = {
"x": "index", "y": "values", "hue": "columns", "style": "columns",
}
# TODO where best to define default parameters?
sort = True
def label_axes(self, ax):
"""Set x and y labels with visibility that matches the ticklabels."""
if "x" in self.variables and self.variables["x"] is not None:
x_visible = any(t.get_visible() for t in ax.get_xticklabels())
ax.set_xlabel(self.variables["x"], visible=x_visible)
if "y" in self.variables and self.variables["y"] is not None:
y_visible = any(t.get_visible() for t in ax.get_yticklabels())
ax.set_ylabel(self.variables["y"], visible=y_visible)
def add_legend_data(self, ax):
"""Add labeled artists to represent the different plot semantics."""
verbosity = self.legend
if verbosity not in ["brief", "full"]:
err = "`legend` must be 'brief', 'full', or False"
raise ValueError(err)
legend_kwargs = {}
keys = []
title_kws = dict(color="w", s=0, linewidth=0, marker="", dashes="")
def update(var_name, val_name, **kws):
key = var_name, val_name
if key in legend_kwargs:
legend_kwargs[key].update(**kws)
else:
keys.append(key)
legend_kwargs[key] = dict(**kws)
# -- Add a legend for hue semantics
if verbosity == "brief" and self._hue_map.map_type == "numeric":
if isinstance(self._hue_map.norm, mpl.colors.LogNorm):
locator = mpl.ticker.LogLocator(numticks=3)
else:
locator = mpl.ticker.MaxNLocator(nbins=3)
limits = min(self._hue_map.levels), max(self._hue_map.levels)
hue_levels, hue_formatted_levels = locator_to_legend_entries(
locator, limits, self.plot_data["hue"].dtype
)
elif self._hue_map.levels is None:
hue_levels = hue_formatted_levels = []
else:
hue_levels = hue_formatted_levels = self._hue_map.levels
# Add the hue semantic subtitle
if "hue" in self.variables and self.variables["hue"] is not None:
update((self.variables["hue"], "title"),
self.variables["hue"], **title_kws)
# Add the hue semantic labels
for level, formatted_level in zip(hue_levels, hue_formatted_levels):
if level is not None:
color = self._hue_map(level)
update(self.variables["hue"], formatted_level, color=color)
# -- Add a legend for size semantics
if verbosity == "brief" and self._size_map.map_type == "numeric":
# Define how ticks will interpolate between the min/max data values
if isinstance(self._size_map.norm, mpl.colors.LogNorm):
locator = mpl.ticker.LogLocator(numticks=3)
else:
locator = mpl.ticker.MaxNLocator(nbins=3)
# Define the min/max data values
limits = min(self._size_map.levels), max(self._size_map.levels)
size_levels, size_formatted_levels = locator_to_legend_entries(
locator, limits, self.plot_data["size"].dtype
)
elif self._size_map.levels is None:
size_levels = size_formatted_levels = []
else:
size_levels = size_formatted_levels = self._size_map.levels
# Add the size semantic subtitle
if "size" in self.variables and self.variables["size"] is not None:
update((self.variables["size"], "title"),
self.variables["size"], **title_kws)
# Add the size semantic labels
for level, formatted_level in zip(size_levels, size_formatted_levels):
if level is not None:
size = self._size_map(level)
update(
self.variables["size"],
formatted_level,
linewidth=size,
s=size,
)
# -- Add a legend for style semantics
# Add the style semantic title
if "style" in self.variables and self.variables["style"] is not None:
update((self.variables["style"], "title"),
self.variables["style"], **title_kws)
# Add the style semantic labels
if self._style_map.levels is not None:
for level in self._style_map.levels:
if level is not None:
attrs = self._style_map(level)
update(
self.variables["style"],
level,
marker=attrs.get("marker", ""),
dashes=attrs.get("dashes", ""),
)
func = getattr(ax, self._legend_func)
legend_data = {}
legend_order = []
for key in keys:
_, label = key
kws = legend_kwargs[key]
kws.setdefault("color", ".2")
use_kws = {}
for attr in self._legend_attributes + ["visible"]:
if attr in kws:
use_kws[attr] = kws[attr]
artist = func([], [], label=label, **use_kws)
if self._legend_func == "plot":
artist = artist[0]
legend_data[key] = artist
legend_order.append(key)
self.legend_data = legend_data
self.legend_order = legend_order
class _LinePlotter(_RelationalPlotter):
_legend_attributes = ["color", "linewidth", "marker", "dashes"]
_legend_func = "plot"
def __init__(
self, *,
data=None, variables={},
estimator=None, ci=None, n_boot=None, seed=None,
sort=True, err_style=None, err_kws=None, legend=None
):
# TODO this is messy, we want the mapping to be agnoistic about
# the kind of plot to draw, but for the time being we need to set
# this information so the SizeMapping can use it
self._default_size_range = (
np.r_[.5, 2] * mpl.rcParams["lines.linewidth"]
)
super().__init__(data=data, variables=variables)
self.estimator = estimator
self.ci = ci
self.n_boot = n_boot
self.seed = seed
self.sort = sort
self.err_style = err_style
self.err_kws = {} if err_kws is None else err_kws
self.legend = legend
def aggregate(self, vals, grouper, units=None):
"""Compute an estimate and confidence interval using grouper."""
func = self.estimator
ci = self.ci
n_boot = self.n_boot
seed = self.seed
# Define a "null" CI for when we only have one value
null_ci = pd.Series(index=["low", "high"], dtype=np.float)
# Function to bootstrap in the context of a pandas group by
def bootstrapped_cis(vals):
if len(vals) <= 1:
return null_ci
boots = bootstrap(vals, func=func, n_boot=n_boot, seed=seed)
cis = ci_func(boots, ci)
return pd.Series(cis, ["low", "high"])
# Group and get the aggregation estimate
grouped = vals.groupby(grouper, sort=self.sort)
est = grouped.agg(func)
# Exit early if we don't want a confidence interval
if ci is None:
return est.index, est, None
# Compute the error bar extents
if ci == "sd":
sd = grouped.std()
cis = pd.DataFrame(np.c_[est - sd, est + sd],
index=est.index,
columns=["low", "high"]).stack()
else:
cis = grouped.apply(bootstrapped_cis)
# Unpack the CIs into "wide" format for plotting
if cis.notnull().any():
cis = cis.unstack().reindex(est.index)
else:
cis = None
return est.index, est, cis
def plot(self, ax, kws):
"""Draw the plot onto an axes, passing matplotlib kwargs."""
# Draw a test plot, using the passed in kwargs. The goal here is to
# honor both (a) the current state of the plot cycler and (b) the
# specified kwargs on all the lines we will draw, overriding when
# relevant with the data semantics. Note that we won't cycle
# internally; in other words, if ``hue`` is not used, all elements will
# have the same color, but they will have the color that you would have
# gotten from the corresponding matplotlib function, and calling the
# function will advance the axes property cycle.
scout, = ax.plot([], [], **kws)
orig_color = kws.pop("color", scout.get_color())
orig_marker = kws.pop("marker", scout.get_marker())
orig_linewidth = kws.pop("linewidth",
kws.pop("lw", scout.get_linewidth()))
# Note that scout.get_linestyle() is` not correct as of mpl 3.2
orig_linestyle = kws.pop("linestyle", kws.pop("ls", None))
kws.setdefault("markeredgewidth", kws.pop("mew", .75))
kws.setdefault("markeredgecolor", kws.pop("mec", "w"))
scout.remove()
# Set default error kwargs
err_kws = self.err_kws.copy()
if self.err_style == "band":
err_kws.setdefault("alpha", .2)
elif self.err_style == "bars":
pass
elif self.err_style is not None:
err = "`err_style` must be 'band' or 'bars', not {}"
raise ValueError(err.format(self.err_style))
# Set the default artist keywords
kws.update(dict(
color=orig_color,
marker=orig_marker,
linewidth=orig_linewidth,
linestyle=orig_linestyle,
))
# Loop over the semantic subsets and add to the plot
grouping_semantics = "hue", "size", "style"
for sub_vars, sub_data in self._semantic_subsets(
grouping_semantics, from_comp_data=True
):
if self.sort:
sort_vars = ["units", "x", "y"]
sort_cols = [var for var in sort_vars if var in self.variables]
sub_data = sub_data.sort_values(sort_cols)
# Due to the original design, code below was written assuming that
# sub_data always has x, y, and units columns, which may be empty.
# Adding this here to avoid otherwise disruptive changes, but it
# could get removed if the rest of the logic is sorted out
null = pd.Series(index=sub_data.index, dtype=float)
x = sub_data.get("x", null)
y = sub_data.get("y", null)
u = sub_data.get("units", null)
if self.estimator is not None:
if "units" in self.variables:
err = "estimator must be None when specifying units"
raise ValueError(err)
x, y, y_ci = self.aggregate(y, x, u)
else:
y_ci = None
if "hue" in sub_vars:
kws["color"] = self._hue_map(sub_vars["hue"])
if "size" in sub_vars:
kws["linewidth"] = self._size_map(sub_vars["size"])
if "style" in sub_vars:
attributes = self._style_map(sub_vars["style"])
if "dashes" in attributes:
kws["dashes"] = attributes["dashes"]
if "marker" in attributes:
kws["marker"] = attributes["marker"]
line, = ax.plot([], [], **kws)
line_color = line.get_color()
line_alpha = line.get_alpha()
line_capstyle = line.get_solid_capstyle()
line.remove()
# --- Draw the main line
x, y = np.asarray(x), np.asarray(y)
if "units" in self.variables:
for u_i in u.unique():
rows = np.asarray(u == u_i)
ax.plot(x[rows], y[rows], **kws)
else:
line, = ax.plot(x, y, **kws)
# --- Draw the confidence intervals
if y_ci is not None:
low, high = np.asarray(y_ci["low"]), np.asarray(y_ci["high"])
if self.err_style == "band":
ax.fill_between(x, low, high, color=line_color, **err_kws)
elif self.err_style == "bars":
y_err = ci_to_errsize((low, high), y)
ebars = ax.errorbar(x, y, y_err, linestyle="",
color=line_color, alpha=line_alpha,
**err_kws)
# Set the capstyle properly on the error bars
for obj in ebars.get_children():
try:
obj.set_capstyle(line_capstyle)
except AttributeError:
# Does not exist on mpl < 2.2
pass
# Finalize the axes details
self.label_axes(ax)
if self.legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
ax.legend()
class _ScatterPlotter(_RelationalPlotter):
_legend_attributes = ["color", "s", "marker"]
_legend_func = "scatter"
def __init__(
self, *,
data=None, variables={},
x_bins=None, y_bins=None,
estimator=None, ci=None, n_boot=None,
alpha=None, x_jitter=None, y_jitter=None,
legend=None
):
# TODO this is messy, we want the mapping to be agnoistic about
# the kind of plot to draw, but for the time being we need to set
# this information so the SizeMapping can use it
self._default_size_range = (
np.r_[.5, 2] * np.square(mpl.rcParams["lines.markersize"])
)
super().__init__(data=data, variables=variables)
self.alpha = alpha
self.legend = legend
def plot(self, ax, kws):
# Draw a test plot, using the passed in kwargs. The goal here is to
# honor both (a) the current state of the plot cycler and (b) the
# specified kwargs on all the lines we will draw, overriding when
# relevant with the data semantics. Note that we won't cycle
# internally; in other words, if ``hue`` is not used, all elements will
# have the same color, but they will have the color that you would have
# gotten from the corresponding matplotlib function, and calling the
# function will advance the axes property cycle.
scout_size = max(
np.atleast_1d(kws.get("s", [])).shape[0],
np.atleast_1d(kws.get("c", [])).shape[0],
)
scout_x = scout_y = np.full(scout_size, np.nan)
scout = ax.scatter(scout_x, scout_y, **kws)
s = kws.pop("s", scout.get_sizes())
c = kws.pop("c", scout.get_facecolors())
scout.remove()
kws.pop("color", None) # TODO is this optimal?
# --- Determine the visual attributes of the plot
data = self.plot_data[list(self.variables)].dropna()
if not data.size:
return
# Define the vectors of x and y positions
empty = np.full(len(data), np.nan)
x = data.get("x", empty)
y = data.get("y", empty)
# Apply the mapping from semantic varibles to artist attributes
if "hue" in self.variables:
c = self._hue_map(data["hue"])
if "size" in self.variables:
s = self._size_map(data["size"])
# Set defaults for other visual attributres
kws.setdefault("linewidth", .08 * np.sqrt(np.percentile(s, 10)))
kws.setdefault("edgecolor", "w")
if "style" in self.variables:
# Use a representative marker so scatter sets the edgecolor
# properly for line art markers. We currently enforce either
# all or none line art so this works.
example_level = self._style_map.levels[0]
example_marker = self._style_map(example_level, "marker")
kws.setdefault("marker", example_marker)
# TODO this makes it impossible to vary alpha with hue which might
# otherwise be useful? Should we just pass None?
kws["alpha"] = 1 if self.alpha == "auto" else self.alpha
# Draw the scatter plot
args = np.asarray(x), np.asarray(y), np.asarray(s), np.asarray(c)
points = ax.scatter(*args, **kws)
# Update the paths to get different marker shapes.
# This has to be done here because ax.scatter allows varying sizes
# and colors but only a single marker shape per call.
if "style" in self.variables:
p = [self._style_map(val, "path") for val in data["style"]]
points.set_paths(p)
# Finalize the axes details
self.label_axes(ax)
if self.legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
ax.legend()
_relational_docs = dict(
# --- Introductory prose
main_api_narrative=dedent("""\
The relationship between ``x`` and ``y`` can be shown for different subsets
of the data using the ``hue``, ``size``, and ``style`` parameters. These
parameters control what visual semantics are used to identify the different
subsets. It is possible to show up to three dimensions independently by
using all three semantic types, but this style of plot can be hard to
interpret and is often ineffective. Using redundant semantics (i.e. both
``hue`` and ``style`` for the same variable) can be helpful for making
graphics more accessible.
See the :ref:`tutorial <relational_tutorial>` for more information.\
"""),
relational_semantic_narrative=dedent("""\
The default treatment of the ``hue`` (and to a lesser extent, ``size``)
semantic, if present, depends on whether the variable is inferred to
represent "numeric" or "categorical" data. In particular, numeric variables
are represented with a sequential colormap by default, and the legend
entries show regular "ticks" with values that may or may not exist in the
data. This behavior can be controlled through various parameters, as
described and illustrated below.\
"""),
# --- Shared function parameters
data_vars=dedent("""\
x, y : names of variables in ``data`` or vector data, optional
Input data variables; must be numeric. Can pass data directly or
reference columns in ``data``.\
"""),
data=dedent("""\
data : DataFrame, array, or list of arrays, optional
Input data structure. If ``x`` and ``y`` are specified as names, this
should be a "long-form" DataFrame containing those columns. Otherwise
it is treated as "wide-form" data and grouping variables are ignored.
See the examples for the various ways this parameter can be specified
and the different effects of each.\
"""),
palette=dedent("""\
palette : string, list, dict, or matplotlib colormap
An object that determines how colors are chosen when ``hue`` is used.
It can be the name of a seaborn palette or matplotlib colormap, a list
of colors (anything matplotlib understands), a dict mapping levels
of the ``hue`` variable to colors, or a matplotlib colormap object.\
"""),
hue_order=dedent("""\
hue_order : list, optional
Specified order for the appearance of the ``hue`` variable levels,
otherwise they are determined from the data. Not relevant when the
``hue`` variable is numeric.\
"""),
hue_norm=dedent("""\
hue_norm : tuple or Normalize object, optional
Normalization in data units for colormap applied to the ``hue``
variable when it is numeric. Not relevant if it is categorical.\
"""),
sizes=dedent("""\
sizes : list, dict, or tuple, optional
An object that determines how sizes are chosen when ``size`` is used.
It can always be a list of size values or a dict mapping levels of the
``size`` variable to sizes. When ``size`` is numeric, it can also be
a tuple specifying the minimum and maximum size to use such that other
values are normalized within this range.\
"""),
size_order=dedent("""\
size_order : list, optional
Specified order for appearance of the ``size`` variable levels,
otherwise they are determined from the data. Not relevant when the
``size`` variable is numeric.\
"""),
size_norm=dedent("""\
size_norm : tuple or Normalize object, optional
Normalization in data units for scaling plot objects when the
``size`` variable is numeric.\
"""),
markers=dedent("""\
markers : boolean, list, or dictionary, optional
Object determining how to draw the markers for different levels of the
``style`` variable. Setting to ``True`` will use default markers, or
you can pass a list of markers or a dictionary mapping levels of the
``style`` variable to markers. Setting to ``False`` will draw
marker-less lines. Markers are specified as in matplotlib.\
"""),
style_order=dedent("""\
style_order : list, optional
Specified order for appearance of the ``style`` variable levels
otherwise they are determined from the data. Not relevant when the
``style`` variable is numeric.\
"""),
units=dedent("""\
units : {long_form_var}
Grouping variable identifying sampling units. When used, a separate
line will be drawn for each unit with appropriate semantics, but no
legend entry will be added. Useful for showing distribution of
experimental replicates when exact identities are not needed.
"""),
estimator=dedent("""\
estimator : name of pandas method or callable or None, optional
Method for aggregating across multiple observations of the ``y``
variable at the same ``x`` level. If ``None``, all observations will
be drawn.\
"""),
ci=dedent("""\
ci : int or "sd" or None, optional
Size of the confidence interval to draw when aggregating with an
estimator. "sd" means to draw the standard deviation of the data.
Setting to ``None`` will skip bootstrapping.\
"""),
n_boot=dedent("""\
n_boot : int, optional
Number of bootstraps to use for computing the confidence interval.\
"""),
seed=dedent("""\
seed : int, numpy.random.Generator, or numpy.random.RandomState, optional
Seed or random number generator for reproducible bootstrapping.\
"""),
legend=dedent("""\
legend : "brief", "full", or False, optional
How to draw the legend. If "brief", numeric ``hue`` and ``size``
variables will be represented with a sample of evenly spaced values.
If "full", every group will get an entry in the legend. If ``False``,
no legend data is added and no legend is drawn.\
"""),
ax_in=dedent("""\
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.\
"""),
ax_out=dedent("""\
ax : matplotlib Axes
Returns the Axes object with the plot drawn onto it.\
"""),
# --- Repeated phrases
long_form_var="name of variables in ``data`` or vector data, optional",
)
_relational_docs.update(_facet_docs)
@_deprecate_positional_args
def lineplot(
*,
x=None, y=None,
hue=None, size=None, style=None,
data=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
dashes=True, markers=None, style_order=None,
units=None, estimator="mean", ci=95, n_boot=1000, seed=None,
sort=True, err_style="band", err_kws=None,
legend="brief", ax=None, **kwargs
):
variables = _LinePlotter.get_semantics(locals())
p = _LinePlotter(
data=data, variables=variables,
estimator=estimator, ci=ci, n_boot=n_boot, seed=seed,
sort=sort, err_style=err_style, err_kws=err_kws, legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, dashes=dashes, order=style_order)
if ax is None:
ax = plt.gca()
if not p.has_xy_data:
return ax
p._attach(ax)
p.plot(ax, kwargs)
return ax
lineplot.__doc__ = dedent("""\
Draw a line plot with possibility of several semantic groupings.
{main_api_narrative}
{relational_semantic_narrative}
By default, the plot aggregates over multiple ``y`` values at each value of
``x`` and shows an estimate of the central tendency and a confidence
interval for that estimate.
Parameters
----------
{data_vars}
hue : {long_form_var}
Grouping variable that will produce lines with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : {long_form_var}
Grouping variable that will produce lines with different widths.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : {long_form_var}
Grouping variable that will produce lines with different dashes
and/or markers. Can have a numeric dtype but will always be treated
as categorical.
{data}
{palette}
{hue_order}
{hue_norm}
{sizes}
{size_order}
{size_norm}
dashes : boolean, list, or dictionary, optional
Object determining how to draw the lines for different levels of the
``style`` variable. Setting to ``True`` will use default dash codes, or
you can pass a list of dash codes or a dictionary mapping levels of the
``style`` variable to dash codes. Setting to ``False`` will use solid
lines for all subsets. Dashes are specified as in matplotlib: a tuple
of ``(segment, gap)`` lengths, or an empty string to draw a solid line.
{markers}
{style_order}
{units}
{estimator}
{ci}
{n_boot}
{seed}
sort : boolean, optional
If True, the data will be sorted by the x and y variables, otherwise
lines will connect points in the order they appear in the dataset.
err_style : "band" or "bars", optional
Whether to draw the confidence intervals with translucent error bands
or discrete error bars.
err_kws : dict of keyword arguments
Additional paramters to control the aesthetics of the error bars. The
kwargs are passed either to :meth:`matplotlib.axes.Axes.fill_between`
or :meth:`matplotlib.axes.Axes.errorbar`, depending on ``err_style``.
{legend}
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed down to
:meth:`matplotlib.axes.Axes.plot`.
Returns
-------
{ax_out}
See Also
--------
scatterplot : Show the relationship between two variables without
emphasizing continuity of the ``x`` variable.
pointplot : Show the relationship between two variables when one is
categorical.
Examples
--------
Draw a single line plot with error bands showing a confidence interval:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> import matplotlib.pyplot as plt
>>> fmri = sns.load_dataset("fmri")
>>> ax = sns.lineplot(x="timepoint", y="signal", data=fmri)
Group by another variable and show the groups with different colors:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="timepoint", y="signal", hue="event",
... data=fmri)
Show the grouping variable with both color and line dashing:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="timepoint", y="signal",
... hue="event", style="event", data=fmri)
Use color and line dashing to represent two different grouping variables:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="timepoint", y="signal",
... hue="region", style="event", data=fmri)
Use markers instead of the dashes to identify groups:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="timepoint", y="signal",
... hue="event", style="event",
... markers=True, dashes=False, data=fmri)
Show error bars instead of error bands and plot the standard error:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="timepoint", y="signal", hue="event",
... err_style="bars", ci=68, data=fmri)
Show experimental replicates instead of aggregating:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="timepoint", y="signal", hue="event",
... units="subject", estimator=None, lw=1,
... data=fmri.query("region == 'frontal'"))
Use a quantitative color mapping:
.. plot::
:context: close-figs
>>> dots = sns.load_dataset("dots").query("align == 'dots'")
>>> ax = sns.lineplot(x="time", y="firing_rate",
... hue="coherence", style="choice",
... data=dots)
Use a different normalization for the colormap:
.. plot::
:context: close-figs
>>> from matplotlib.colors import LogNorm
>>> ax = sns.lineplot(x="time", y="firing_rate",
... hue="coherence", style="choice",
... hue_norm=LogNorm(),
... data=dots.query("coherence > 0"))
Use a different color palette:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="time", y="firing_rate",
... hue="coherence", style="choice",
... palette="ch:2.5,.25", data=dots)
Use specific color values, treating the hue variable as categorical:
.. plot::
:context: close-figs
>>> palette = sns.color_palette("mako_r", 6)
>>> ax = sns.lineplot(x="time", y="firing_rate",
... hue="coherence", style="choice",
... palette=palette, data=dots)
Change the width of the lines with a quantitative variable:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="time", y="firing_rate",
... size="coherence", hue="choice",
... legend="full", data=dots)
Change the range of line widths used to normalize the size variable:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="time", y="firing_rate",
... size="coherence", hue="choice",
... sizes=(.25, 2.5), data=dots)
Plot from a wide-form DataFrame:
.. plot::
:context: close-figs
>>> import numpy as np, pandas as pd; plt.close("all")
>>> index = pd.date_range("1 1 2000", periods=100,
... freq="m", name="date")
>>> data = np.random.randn(100, 4).cumsum(axis=0)
>>> wide_df = pd.DataFrame(data, index, ["a", "b", "c", "d"])
>>> ax = sns.lineplot(data=wide_df)
Plot from a list of Series:
.. plot::
:context: close-figs
>>> list_data = [wide_df.loc[:"2005", "a"], wide_df.loc["2003":, "b"]]
>>> ax = sns.lineplot(data=list_data)
Plot a single Series, pass kwargs to :meth:`matplotlib.axes.Axes.plot`:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(data=wide_df["a"], color="coral", label="line")
Draw lines at points as they appear in the dataset:
.. plot::
:context: close-figs
>>> x, y = np.random.randn(2, 5000).cumsum(axis=1)
>>> ax = sns.lineplot(x=x, y=y, sort=False, lw=1)
Use :func:`relplot` to combine :func:`lineplot` and :class:`FacetGrid`:
This allows grouping within additional categorical variables. Using
:func:`relplot` is safer than using :class:`FacetGrid` directly, as it
ensures synchronization of the semantic mappings across facets.
.. plot::
:context: close-figs
>>> g = sns.relplot(x="timepoint", y="signal",
... col="region", hue="event", style="event",
... kind="line", data=fmri)
""").format(**_relational_docs)
@_deprecate_positional_args
def scatterplot(
*,
x=None, y=None,
hue=None, style=None, size=None, data=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=True, style_order=None,
x_bins=None, y_bins=None,
units=None, estimator=None, ci=95, n_boot=1000,
alpha=None, x_jitter=None, y_jitter=None,
legend="brief", ax=None, **kwargs
):
variables = _ScatterPlotter.get_semantics(locals())
p = _ScatterPlotter(
data=data, variables=variables,
x_bins=x_bins, y_bins=y_bins,
estimator=estimator, ci=ci, n_boot=n_boot,
alpha=alpha, x_jitter=x_jitter, y_jitter=y_jitter, legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, order=style_order)
if ax is None:
ax = plt.gca()
if not p.has_xy_data:
return ax
p._attach(ax)
p.plot(ax, kwargs)
return ax
scatterplot.__doc__ = dedent("""\
Draw a scatter plot with possibility of several semantic groupings.
{main_api_narrative}
{relational_semantic_narrative}
Parameters
----------
{data_vars}
hue : {long_form_var}
Grouping variable that will produce points with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : {long_form_var}
Grouping variable that will produce points with different sizes.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : {long_form_var}
Grouping variable that will produce points with different markers.
Can have a numeric dtype but will always be treated as categorical.
{data}
{palette}
{hue_order}
{hue_norm}
{sizes}
{size_order}
{size_norm}
{markers}
{style_order}
{{x,y}}_bins : lists or arrays or functions
*Currently non-functional.*
{units}
*Currently non-functional.*
{estimator}
*Currently non-functional.*
{ci}
*Currently non-functional.*
{n_boot}
*Currently non-functional.*
alpha : float
Proportional opacity of the points.
{{x,y}}_jitter : booleans or floats
*Currently non-functional.*
{legend}
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed down to
:meth:`matplotlib.axes.Axes.scatter`.
Returns
-------
{ax_out}
See Also
--------
lineplot : Show the relationship between two variables connected with
lines to emphasize continuity.
swarmplot : Draw a scatter plot with one categorical variable, arranging
the points to show the distribution of values.
Examples
--------
Draw a simple scatter plot between two variables:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> import matplotlib.pyplot as plt
>>> tips = sns.load_dataset("tips")
>>> ax = sns.scatterplot(x="total_bill", y="tip", data=tips)
Group by another variable and show the groups with different colors:
.. plot::
:context: close-figs
>>> ax = sns.scatterplot(x="total_bill", y="tip", hue="time",
... data=tips)
Show the grouping variable by varying both color and marker:
.. plot::
:context: close-figs
>>> ax = sns.scatterplot(x="total_bill", y="tip",
... hue="time", style="time", data=tips)
Vary colors and markers to show two different grouping variables:
.. plot::
:context: close-figs
>>> ax = sns.scatterplot(x="total_bill", y="tip",
... hue="day", style="time", data=tips)
Show a quantitative variable by varying the size of the points:
.. plot::
:context: close-figs
>>> ax = sns.scatterplot(x="total_bill", y="tip", size="size",
... data=tips)
Also show the quantitative variable by also using continuous colors:
.. plot::
:context: close-figs
>>> ax = sns.scatterplot(x="total_bill", y="tip",
... hue="size", size="size",
... data=tips)
Use a different continuous color map:
.. plot::
:context: close-figs
>>> cmap = sns.cubehelix_palette(dark=.3, light=.8, as_cmap=True)
>>> ax = sns.scatterplot(x="total_bill", y="tip",
... hue="size", size="size",
... palette=cmap,
... data=tips)
Change the minimum and maximum point size and show all sizes in legend:
.. plot::
:context: close-figs
>>> cmap = sns.cubehelix_palette(dark=.3, light=.8, as_cmap=True)
>>> ax = sns.scatterplot(x="total_bill", y="tip",
... hue="size", size="size",
... sizes=(20, 200), palette=cmap,
... legend="full", data=tips)
Use a narrower range of color map intensities:
.. plot::
:context: close-figs
>>> cmap = sns.cubehelix_palette(dark=.3, light=.8, as_cmap=True)
>>> ax = sns.scatterplot(x="total_bill", y="tip",
... hue="size", size="size",
... sizes=(20, 200), hue_norm=(0, 7),
... legend="full", data=tips)
Vary the size with a categorical variable, and use a different palette:
.. plot::
:context: close-figs
>>> cmap = sns.cubehelix_palette(dark=.3, light=.8, as_cmap=True)
>>> ax = sns.scatterplot(x="total_bill", y="tip",
... hue="day", size="smoker",
... palette="Set2",
... data=tips)
Use a specific set of markers:
.. plot::
:context: close-figs
>>> markers = {{"Lunch": "s", "Dinner": "X"}}
>>> ax = sns.scatterplot(x="total_bill", y="tip", style="time",
... markers=markers,
... data=tips)
Control plot attributes using matplotlib parameters:
.. plot::
:context: close-figs
>>> ax = sns.scatterplot(x="total_bill", y="tip",
... s=100, color=".2", marker="+",
... data=tips)
Pass data vectors instead of names in a data frame:
.. plot::
:context: close-figs
>>> iris = sns.load_dataset("iris")
>>> ax = sns.scatterplot(x=iris.sepal_length, y=iris.sepal_width,
... hue=iris.species, style=iris.species)
Pass a wide-form dataset and plot against its index:
.. plot::
:context: close-figs
>>> import numpy as np, pandas as pd; plt.close("all")
>>> index = pd.date_range("1 1 2000", periods=100,
... freq="m", name="date")
>>> data = np.random.randn(100, 4).cumsum(axis=0)
>>> wide_df = pd.DataFrame(data, index, ["a", "b", "c", "d"])
>>> ax = sns.scatterplot(data=wide_df)
Use :func:`relplot` to combine :func:`scatterplot` and :class:`FacetGrid`:
This allows grouping within additional categorical variables. Using
:func:`relplot` is safer than using :class:`FacetGrid` directly, as it
ensures synchronization of the semantic mappings across facets.
.. plot::
:context: close-figs
>>> g = sns.relplot(x="total_bill", y="tip",
... col="time", hue="day", style="day",
... kind="scatter", data=tips)
""").format(**_relational_docs)
@_deprecate_positional_args
def relplot(
*,
x=None, y=None,
hue=None, size=None, style=None, data=None,
row=None, col=None,
col_wrap=None, row_order=None, col_order=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=None, dashes=None, style_order=None,
legend="brief", kind="scatter",
height=5, aspect=1, facet_kws=None,
units=None,
**kwargs
):
if kind == "scatter":
plotter = _ScatterPlotter
func = scatterplot
markers = True if markers is None else markers
elif kind == "line":
plotter = _LinePlotter
func = lineplot
dashes = True if dashes is None else dashes
else:
err = "Plot kind {} not recognized".format(kind)
raise ValueError(err)
# Check for attempt to plot onto specific axes and warn
if "ax" in kwargs:
msg = (
"relplot is a figure-level function and does not accept "
"the ax= paramter. You may wish to try {}".format(kind + "plot")
)
warnings.warn(msg, UserWarning)
kwargs.pop("ax")
# Use the full dataset to map the semantics
p = plotter(
data=data,
variables=plotter.get_semantics(locals()),
legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, dashes=dashes, order=style_order)
# Extract the semantic mappings
if "hue" in p.variables:
palette = p._hue_map.lookup_table
hue_order = p._hue_map.levels
hue_norm = p._hue_map.norm
else:
palette = hue_order = hue_norm = None
if "size" in p.variables:
sizes = p._size_map.lookup_table
size_order = p._size_map.levels
size_norm = p._size_map.norm
if "style" in p.variables:
style_order = p._style_map.levels
if markers:
markers = {k: p._style_map(k, "marker") for k in style_order}
else:
markers = None
if dashes:
dashes = {k: p._style_map(k, "dashes") for k in style_order}
else:
dashes = None
else:
markers = dashes = style_order = None
# Now extract the data that would be used to draw a single plot
variables = p.variables
plot_data = p.plot_data
plot_semantics = p.semantics
# Define the common plotting parameters
plot_kws = dict(
palette=palette, hue_order=hue_order, hue_norm=hue_norm,
sizes=sizes, size_order=size_order, size_norm=size_norm,
markers=markers, dashes=dashes, style_order=style_order,
legend=False,
)
plot_kws.update(kwargs)
if kind == "scatter":
plot_kws.pop("dashes")
# Define the named variables for plotting on each facet
plot_variables = {key: key for key in p.variables}
plot_kws.update(plot_variables)
# Add the grid semantics onto the plotter
grid_semantics = "row", "col"
p.semantics = plot_semantics + grid_semantics
p.assign_variables(
data=data,
variables=dict(
x=x, y=y,
hue=hue, size=size, style=style, units=units,
row=row, col=col,
),
)
# Pass the row/col variables to FacetGrid with their original
# names so that the axes titles render correctly
grid_kws = {v: p.variables.get(v, None) for v in grid_semantics}
full_data = p.plot_data.rename(columns=grid_kws)
# Set up the FacetGrid object
facet_kws = {} if facet_kws is None else facet_kws.copy()
facet_kws.update(grid_kws)
g = FacetGrid(
data=full_data,
col_wrap=col_wrap, row_order=row_order, col_order=col_order,
height=height, aspect=aspect, dropna=False,
**facet_kws
)
# Draw the plot
g.map_dataframe(func, **plot_kws)
# Label the axes
g.set_axis_labels(
variables.get("x", None), variables.get("y", None)
)
# Show the legend
if legend:
# Replace the original plot data so the legend uses
# numeric data with the correct type
p.plot_data = plot_data
p.add_legend_data(g.axes.flat[0])
if p.legend_data:
g.add_legend(legend_data=p.legend_data,
label_order=p.legend_order)
return g
relplot.__doc__ = dedent("""\
Figure-level interface for drawing relational plots onto a FacetGrid.
This function provides access to several different axes-level functions
that show the relationship between two variables with semantic mappings
of subsets. The ``kind`` parameter selects the underlying axes-level
function to use:
- :func:`scatterplot` (with ``kind="scatter"``; the default)
- :func:`lineplot` (with ``kind="line"``)
Extra keyword arguments are passed to the underlying function, so you
should refer to the documentation for each to see kind-specific options.
{main_api_narrative}
{relational_semantic_narrative}
After plotting, the :class:`FacetGrid` with the plot is returned and can
be used directly to tweak supporting plot details or add other layers.
Note that, unlike when using the underlying plotting functions directly,
data must be passed in a long-form DataFrame with variables specified by
passing strings to ``x``, ``y``, and other parameters.
Parameters
----------
x, y : names of variables in ``data``
Input data variables; must be numeric.
hue : name in ``data``, optional
Grouping variable that will produce elements with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : name in ``data``, optional
Grouping variable that will produce elements with different sizes.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : name in ``data``, optional
Grouping variable that will produce elements with different styles.
Can have a numeric dtype but will always be treated as categorical.
{data}
row, col : names of variables in ``data``, optional
Categorical variables that will determine the faceting of the grid.
{col_wrap}
row_order, col_order : lists of strings, optional
Order to organize the rows and/or columns of the grid in, otherwise the
orders are inferred from the data objects.
{palette}
{hue_order}
{hue_norm}
{sizes}
{size_order}
{size_norm}
{legend}
kind : string, optional
Kind of plot to draw, corresponding to a seaborn relational plot.
Options are {{``scatter`` and ``line``}}.
{height}
{aspect}
facet_kws : dict, optional
Dictionary of other keyword arguments to pass to :class:`FacetGrid`.
kwargs : key, value pairings
Other keyword arguments are passed through to the underlying plotting
function.
Returns
-------
g : :class:`FacetGrid`
Returns the :class:`FacetGrid` object with the plot on it for further
tweaking.
Examples
--------
Draw a single facet to use the :class:`FacetGrid` legend placement:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set(style="ticks")
>>> tips = sns.load_dataset("tips")
>>> g = sns.relplot(x="total_bill", y="tip", hue="day", data=tips)
Facet on the columns with another variable:
.. plot::
:context: close-figs
>>> g = sns.relplot(x="total_bill", y="tip",
... hue="day", col="time", data=tips)
Facet on the columns and rows:
.. plot::
:context: close-figs
>>> g = sns.relplot(x="total_bill", y="tip", hue="day",
... col="time", row="sex", data=tips)
"Wrap" many column facets into multiple rows:
.. plot::
:context: close-figs
>>> g = sns.relplot(x="total_bill", y="tip", hue="time",
... col="day", col_wrap=2, data=tips)
Use multiple semantic variables on each facet with specified attributes:
.. plot::
:context: close-figs
>>> g = sns.relplot(x="total_bill", y="tip", hue="time", size="size",
... palette=["b", "r"], sizes=(10, 100),
... col="time", data=tips)
Use a different kind of plot:
.. plot::
:context: close-figs
>>> fmri = sns.load_dataset("fmri")
>>> g = sns.relplot(x="timepoint", y="signal",
... hue="event", style="event", col="region",
... kind="line", data=fmri)
Change the size of each facet:
.. plot::
:context: close-figs
>>> g = sns.relplot(x="timepoint", y="signal",
... hue="event", style="event", col="region",
... height=5, aspect=.7, kind="line", data=fmri)
""").format(**_relational_docs)
|
the-stack_106_30145 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import torch
import pytorch_lightning as pl
from torch.nn import functional as F
from torch.utils.data import DataLoader, random_split
try:
from torchvision.datasets.mnist import MNIST
from torchvision import transforms
except Exception as e:
from tests.base.datasets import MNIST
class Backbone(torch.nn.Module):
def __init__(self, hidden_dim=128):
super().__init__()
self.l1 = torch.nn.Linear(28 * 28, hidden_dim)
self.l2 = torch.nn.Linear(hidden_dim, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = torch.relu(self.l1(x))
x = torch.relu(self.l2(x))
return x
class LitClassifier(pl.LightningModule):
def __init__(self, backbone, learning_rate=1e-3):
super().__init__()
self.save_hyperparameters()
self.backbone = backbone
def forward(self, x):
# use forward for inference/predictions
embedding = self.backbone(x)
return embedding
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.backbone(x)
loss = F.cross_entropy(y_hat, y)
self.log('train_loss', loss, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self.backbone(x)
loss = F.cross_entropy(y_hat, y)
self.log('valid_loss', loss, on_step=True)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self.backbone(x)
loss = F.cross_entropy(y_hat, y)
self.log('test_loss', loss)
def configure_optimizers(self):
# self.hparams available because we called self.save_hyperparameters()
return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--learning_rate', type=float, default=0.0001)
return parser
def cli_main():
pl.seed_everything(1234)
# ------------
# args
# ------------
parser = ArgumentParser()
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--hidden_dim', type=int, default=128)
parser = pl.Trainer.add_argparse_args(parser)
parser = LitClassifier.add_model_specific_args(parser)
args = parser.parse_args()
# ------------
# data
# ------------
dataset = MNIST('', train=True, download=True, transform=transforms.ToTensor())
mnist_test = MNIST('', train=False, download=True, transform=transforms.ToTensor())
mnist_train, mnist_val = random_split(dataset, [55000, 5000])
train_loader = DataLoader(mnist_train, batch_size=args.batch_size)
val_loader = DataLoader(mnist_val, batch_size=args.batch_size)
test_loader = DataLoader(mnist_test, batch_size=args.batch_size)
# ------------
# model
# ------------
model = LitClassifier(Backbone(hidden_dim=args.hidden_dim), args.learning_rate)
# ------------
# training
# ------------
trainer = pl.Trainer.from_argparse_args(args)
trainer.fit(model, train_loader, val_loader)
# ------------
# testing
# ------------
trainer.test(test_dataloaders=test_loader)
if __name__ == '__main__':
cli_main()
|
the-stack_106_30147 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from functools import partial
try:
import PySide2
except ImportError:
from PySide import QtCore
from PySide import QtGui
import PySide.QtGui as QtWidgets
else:
from PySide2 import QtCore
from PySide2 import QtGui
from PySide2 import QtWidgets
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import compas
from compas.datastructures import Mesh
from compas.datastructures import mesh_flip_cycles
from compas.datastructures import mesh_subdivide
from compas.geometry import centroid_points
from compas.utilities import hex_to_rgb
from compas.utilities import flatten
from compas.viewers import core
from compas.viewers.meshviewer.model import MeshView
__all__ = ['Controller']
get_obj_file = partial(
QtWidgets.QFileDialog.getOpenFileName,
caption='Select OBJ file',
dir=compas.DATA,
filter='OBJ files (*.obj)'
)
get_stl_file = partial(
QtWidgets.QFileDialog.getOpenFileName,
caption='Select STL file',
dir=compas.DATA,
filter='STL files (*.stl)'
)
get_json_file = partial(
QtWidgets.QFileDialog.getOpenFileName,
caption='Select JSON file',
dir=compas.DATA,
filter='JSON files (*.json)'
)
get_ply_file = partial(
QtWidgets.QFileDialog.getOpenFileName,
caption='Select PLY file',
dir=compas.DATA,
filter='PLY files (*.ply)'
)
hex_to_rgb = partial(hex_to_rgb, normalize=True)
def flist(items):
return list(flatten(items))
class Controller(core.controller.Controller):
settings = core.controller.Controller.settings or {}
settings['vertices.size:value'] = 1.0
settings['vertices.size:minval'] = 1
settings['vertices.size:maxval'] = 100
settings['vertices.size:step'] = 1
settings['vertices.size:scale'] = 0.1
settings['edges.width:value'] = 1.0
settings['edges.width:minval'] = 1
settings['edges.width:maxval'] = 100
settings['edges.width:step'] = 1
settings['edges.width:scale'] = 0.1
settings['normals.scale:value'] = 1.0
settings['normals.scale:minval'] = 1
settings['normals.scale:maxval'] = 100
settings['normals.scale:step'] = 1
settings['normals.scale:scale'] = 0.1
settings['vertices.color'] = '#0092d2'
settings['edges.color'] = '#666666'
settings['faces.color:front'] = '#cccccc'
settings['faces.color:back'] = '#ff5e99'
settings['normals.color'] = '#0092d2'
settings['vertices.on'] = True
settings['edges.on'] = True
settings['faces.on'] = True
settings['normals.on'] = False
settings['vertices.labels.on'] = False
settings['edges.labels.on'] = False
settings['faces.labels.on'] = False
settings['camera.elevation:value'] = -10
settings['camera.elevation:minval'] = -180
settings['camera.elevation:maxval'] = 0
settings['camera.elevation:step'] = +1
settings['camera.elevation:scale'] = +1
settings['camera.azimuth:value'] = +30
settings['camera.azimuth:minval'] = -180
settings['camera.azimuth:maxval'] = +180
settings['camera.azimuth:step'] = +1
settings['camera.azimuth:scale'] = +1
settings['camera.distance:value'] = +10
settings['camera.distance:minval'] = 0
settings['camera.distance:maxval'] = +100
settings['camera.distance:step'] = +1
settings['camera.distance:scale'] = +1
settings['camera.distance:delta'] = +0.05
settings['camera.rotation:delta'] = +0.5
settings['camera.fov:value'] = 50
settings['camera.near:value'] = 0.1
settings['camera.far:value'] = 1000
def __init__(self, app):
super(Controller, self).__init__(app)
self._mesh = None
self._meshview = None
@property
def view(self):
return self.app.view
@property
def mesh(self):
return self._mesh
@property
def meshview(self):
return self._meshview
@mesh.setter
def mesh(self, mesh):
self._mesh = mesh
self._meshview = MeshView(mesh)
def center_mesh(self):
xyz = [self.mesh.vertex_coordinates(key) for key in self.mesh.vertices()]
cx, cy, cz = centroid_points(xyz)
for key, attr in self.mesh.vertices(True):
attr['x'] -= cx
attr['y'] -= cy
attr['z'] -= cz
def adjust_camera(self):
pass
# ==========================================================================
# constructors
# ==========================================================================
def from_obj(self):
filename, _ = get_obj_file()
if filename:
self.mesh = Mesh.from_obj(filename)
# self.center_mesh()
self.view.make_buffers()
self.view.updateGL()
def to_obj(self):
self.message('Export to OBJ is under construction...')
def from_json(self):
filename, _ = get_json_file()
if filename:
self.mesh = Mesh.from_json(filename)
self.view.make_buffers()
self.view.updateGL()
def to_json(self):
self.message('Export to JSON is under construction...')
def from_stl(self):
filename, _ = get_stl_file()
if filename:
self.mesh = Mesh.from_stl(filename)
self.view.make_buffers()
self.view.updateGL()
def to_stl(self):
self.message('Export to STL is under construction...')
def from_ply(self):
filename, _ = get_ply_file()
if filename:
self.mesh = Mesh.from_ply(filename)
self.view.make_buffers()
self.view.updateGL()
def to_ply(self):
self.message('Export to STL is under construction...')
def from_polyhedron(self, f):
self.mesh = Mesh.from_polyhedron(f)
self.view.make_buffers()
self.view.updateGL()
# ==========================================================================
# view
# ==========================================================================
def zoom_extents(self):
self.message('Zoom Extents is under construction...')
def zoom_in(self):
self.view.camera.zoom_in()
self.view.updateGL()
def zoom_out(self):
self.view.camera.zoom_out()
self.view.updateGL()
def set_view(self, view):
self.view.current = view
self.view.updateGL()
def update_camera_settings(self):
self.log('Updating the camera settings.')
def capture_image(self):
self.message('Capture Image is under construction...')
def capture_video(self):
self.message('Capture Video is under construction...')
# ==========================================================================
# appearance
# ==========================================================================
def slide_size_vertices(self, value):
self.settings['vertices.size:value'] = value
self.view.updateGL()
def edit_size_vertices(self, value):
self.settings['vertices.size:value'] = value
self.view.updateGL()
def slide_width_edges(self, value):
self.settings['edges.width:value'] = value
self.view.updateGL()
def edit_width_edges(self, value):
self.settings['edges.width:value'] = value
self.view.updateGL()
def slide_scale_normals(self, value):
self.settings['normals.scale:value'] = value
self.view.updateGL()
def edit_scale_normals(self, value):
self.settings['normals.scale:value'] = value
self.view.updateGL()
# ==========================================================================
# visibility
# ==========================================================================
def toggle_faces(self, state):
self.settings['faces.on'] = state == QtCore.Qt.Checked
self.view.updateGL()
def toggle_edges(self, state):
self.settings['edges.on'] = state == QtCore.Qt.Checked
self.view.updateGL()
def toggle_vertices(self, state):
self.settings['vertices.on'] = state == QtCore.Qt.Checked
self.view.updateGL()
def toggle_normals(self, state):
self.message('Display of face and vertex normals is still under construction...')
self.settings['normals.on'] = state == QtCore.Qt.Checked
self.view.updateGL()
# ==========================================================================
# color
# ==========================================================================
def change_vertices_color(self, color):
self.settings['vertices.color'] = color
self.view.update_vertex_buffer('vertices.color', self.view.array_vertices_color)
self.view.updateGL()
self.app.main.activateWindow()
def change_edges_color(self, color):
self.settings['edges.color'] = color
self.view.update_vertex_buffer('edges.color', self.view.array_edges_color)
self.view.updateGL()
self.app.main.activateWindow()
def change_faces_color_front(self, color):
self.settings['faces.color:front'] = color
self.view.update_vertex_buffer('faces.color:front', self.view.array_faces_color_front)
self.view.updateGL()
self.app.main.activateWindow()
def change_faces_color_back(self, color):
self.settings['faces.color:back'] = color
self.view.update_vertex_buffer('faces.color:back', self.view.array_faces_color_back)
self.view.updateGL()
self.app.main.activateWindow()
def change_normals_color(self, color):
self.settings['normals.color'] = color
self.view.update_vertex_buffer('normals.color', self.view.array_normals_color)
self.view.updateGL()
self.app.main.activateWindow()
# ==========================================================================
# camera
# ==========================================================================
def slide_azimuth(self, value):
self.view.camera.rz = float(value)
self.view.updateGL()
def edit_azimuth(self, value):
print(value)
def slide_elevation(self, value):
self.view.camera.rx = float(value)
self.view.updateGL()
def edit_elevation(self, value):
print(value)
def slide_distance(self, value):
self.view.camera.distance = float(value)
self.view.updateGL()
def edit_distance(self, value):
print(value)
def edit_fov(self, value):
self.view.camera.fov = float(value)
self.view.updateGL()
# ==========================================================================
# tools
# ==========================================================================
# open dialog or panel for additional options
# set options and apply
def flip_normals(self):
mesh_flip_cycles(self.mesh)
# this is a bit of a hack
# faces of the viewmesh only get calculated at the time when the mesh
# is assigned to the viewmesh
self.meshview.mesh = self.mesh
self.view.update_index_buffer('faces:front', self.view.array_faces_front)
self.view.update_index_buffer('faces:back', self.view.array_faces_back)
self.view.updateGL()
# implement as toggle?
# if 'on' the orginal is shown as control mesh (edges only)
# and the subdivision surface up to the specified level
def subdivide(self, scheme, k):
self.mesh = mesh_subdivide(self.mesh, scheme=scheme, k=k)
self.view.make_buffers()
self.view.updateGL()
def smooth_wo_shrinking(self):
pass
def smooth_area(self):
pass
def smooth_laplacian(self):
pass
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
|
the-stack_106_30148 | import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
def _print_success_message():
print('Tests Passed')
def test_create_lookup_tables(create_lookup_tables):
with tf.Graph().as_default():
test_text = '''
Moe_Szyslak Moe's Tavern Where the elite meet to drink
Bart_Simpson Eh yeah hello is Mike there Last name Rotch
Moe_Szyslak Hold on I'll check Mike Rotch Mike Rotch Hey has anybody seen Mike Rotch lately
Moe_Szyslak Listen you little puke One of these days I'm gonna catch you and I'm gonna carve my name on your back with an ice pick
Moe_Szyslak Whats the matter Homer You're not your normal effervescent self
Homer_Simpson I got my problems Moe Give me another one
Moe_Szyslak Homer hey you should not drink to forget your problems
Barney_Gumble Yeah you should only drink to enhance your social skills'''
test_text = test_text.lower()
test_text = test_text.split()
vocab_to_int, int_to_vocab = create_lookup_tables(test_text)
# Check types
assert isinstance(vocab_to_int, dict),\
'vocab_to_int is not a dictionary.'
assert isinstance(int_to_vocab, dict),\
'int_to_vocab is not a dictionary.'
# Compare lengths of dicts
assert len(vocab_to_int) == len(int_to_vocab),\
'Length of vocab_to_int and int_to_vocab don\'t match. ' \
'vocab_to_int is length {}. int_to_vocab is length {}'.format(len(vocab_to_int), len(int_to_vocab))
# Make sure the dicts have the same words
vocab_to_int_word_set = set(vocab_to_int.keys())
int_to_vocab_word_set = set(int_to_vocab.values())
assert not (vocab_to_int_word_set - int_to_vocab_word_set),\
'vocab_to_int and int_to_vocab don\'t have the same words.' \
'{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_set - int_to_vocab_word_set)
assert not (int_to_vocab_word_set - vocab_to_int_word_set),\
'vocab_to_int and int_to_vocab don\'t have the same words.' \
'{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_set - vocab_to_int_word_set)
# Make sure the dicts have the same word ids
vocab_to_int_word_id_set = set(vocab_to_int.values())
int_to_vocab_word_id_set = set(int_to_vocab.keys())
assert not (vocab_to_int_word_id_set - int_to_vocab_word_id_set),\
'vocab_to_int and int_to_vocab don\'t contain the same word ids.' \
'{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_id_set - int_to_vocab_word_id_set)
assert not (int_to_vocab_word_id_set - vocab_to_int_word_id_set),\
'vocab_to_int and int_to_vocab don\'t contain the same word ids.' \
'{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_id_set - vocab_to_int_word_id_set)
# Make sure the dicts make the same lookup
missmatches = [(word, id, id, int_to_vocab[id]) for word, id in vocab_to_int.items() if int_to_vocab[id] != word]
assert not missmatches,\
'Found {} missmatche(s). First missmatch: vocab_to_int[{}] = {} and int_to_vocab[{}] = {}'.format(
len(missmatches),
*missmatches[0])
assert len(vocab_to_int) > len(set(test_text))/2,\
'The length of vocab seems too small. Found a length of {}'.format(len(vocab_to_int))
_print_success_message()
def test_get_batches(get_batches):
with tf.Graph().as_default():
test_batch_size = 128
test_seq_length = 5
test_int_text = list(range(1000*test_seq_length))
batches = get_batches(test_int_text, test_batch_size, test_seq_length)
# Check type
assert isinstance(batches, np.ndarray),\
'Batches is not a Numpy array'
# Check shape
assert batches.shape == (7, 2, 128, 5),\
'Batches returned wrong shape. Found {}'.format(batches.shape)
_print_success_message()
def test_tokenize(token_lookup):
with tf.Graph().as_default():
symbols = set(['.', ',', '"', ';', '!', '?', '(', ')', '--', '\n'])
token_dict = token_lookup()
# Check type
assert isinstance(token_dict, dict), \
'Returned type is {}.'.format(type(token_dict))
# Check symbols
missing_symbols = symbols - set(token_dict.keys())
unknown_symbols = set(token_dict.keys()) - symbols
assert not missing_symbols, \
'Missing symbols: {}'.format(missing_symbols)
assert not unknown_symbols, \
'Unknown symbols: {}'.format(unknown_symbols)
# Check values type
bad_value_type = [type(val) for val in token_dict.values() if not isinstance(val, str)]
assert not bad_value_type,\
'Found token as {} type.'.format(bad_value_type[0])
# Check for spaces
key_has_spaces = [k for k in token_dict.keys() if ' ' in k]
val_has_spaces = [val for val in token_dict.values() if ' ' in val]
assert not key_has_spaces,\
'The key "{}" includes spaces. Remove spaces from keys and values'.format(key_has_spaces[0])
assert not val_has_spaces,\
'The value "{}" includes spaces. Remove spaces from keys and values'.format(val_has_spaces[0])
# Check for symbols in values
symbol_val = ()
for symbol in symbols:
for val in token_dict.values():
if symbol in val:
symbol_val = (symbol, val)
assert not symbol_val,\
'Don\'t use a symbol that will be replaced in your tokens. Found the symbol {} in value {}'.format(*symbol_val)
_print_success_message()
def test_get_inputs(get_inputs):
with tf.Graph().as_default():
input_data, targets, lr = get_inputs()
# Check type
assert input_data.op.type == 'Placeholder',\
'Input not a Placeholder.'
assert targets.op.type == 'Placeholder',\
'Targets not a Placeholder.'
assert lr.op.type == 'Placeholder',\
'Learning Rate not a Placeholder.'
# Check name
assert input_data.name == 'input:0',\
'Input has bad name. Found name {}'.format(input_data.name)
# Check rank
input_rank = 0 if input_data.get_shape() == None else len(input_data.get_shape())
targets_rank = 0 if targets.get_shape() == None else len(targets.get_shape())
lr_rank = 0 if lr.get_shape() == None else len(lr.get_shape())
assert input_rank == 2,\
'Input has wrong rank. Rank {} found.'.format(input_rank)
assert targets_rank == 2,\
'Targets has wrong rank. Rank {} found.'.format(targets_rank)
assert lr_rank == 0,\
'Learning Rate has wrong rank. Rank {} found'.format(lr_rank)
_print_success_message()
def test_get_init_cell(get_init_cell):
with tf.Graph().as_default():
test_batch_size_ph = tf.placeholder(tf.int32)
test_rnn_size = 256
cell, init_state = get_init_cell(test_batch_size_ph, test_rnn_size)
# Check type
assert isinstance(cell, tf.contrib.rnn.MultiRNNCell),\
'Cell is wrong type. Found {} type'.format(type(cell))
# Check for name attribute
assert hasattr(init_state, 'name'),\
'Initial state doesn\'t have the "name" attribute. Try using `tf.identity` to set the name.'
# Check name
assert init_state.name == 'initial_state:0',\
'Initial state doesn\'t have the correct name. Found the name {}'.format(init_state.name)
_print_success_message()
def test_get_embed(get_embed):
with tf.Graph().as_default():
embed_shape = [50, 5, 256]
test_input_data = tf.placeholder(tf.int32, embed_shape[:2])
test_vocab_size = 27
test_embed_dim = embed_shape[2]
embed = get_embed(test_input_data, test_vocab_size, test_embed_dim)
# Check shape
assert embed.shape == embed_shape,\
'Wrong shape. Found shape {}'.format(embed.shape)
_print_success_message()
def test_build_rnn(build_rnn):
with tf.Graph().as_default():
test_rnn_size = 256
test_rnn_layer_size = 2
test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size)] * test_rnn_layer_size)
test_inputs = tf.placeholder(tf.float32, [None, None, test_rnn_size])
outputs, final_state = build_rnn(test_cell, test_inputs)
# Check name
assert hasattr(final_state, 'name'),\
'Final state doesn\'t have the "name" attribute. Try using `tf.identity` to set the name.'
assert final_state.name == 'final_state:0',\
'Final state doesn\'t have the correct name. Found the name {}'.format(final_state.name)
# Check shape
assert outputs.get_shape().as_list() == [None, None, test_rnn_size],\
'Outputs has wrong shape. Found shape {}'.format(outputs.get_shape())
assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size],\
'Final state wrong shape. Found shape {}'.format(final_state.get_shape())
_print_success_message()
def test_build_nn(build_nn):
with tf.Graph().as_default():
test_input_data_shape = [128, 5]
test_input_data = tf.placeholder(tf.int32, test_input_data_shape)
test_rnn_size = 256
test_embed_dim = 300
test_rnn_layer_size = 2
test_vocab_size = 27
test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size)] * test_rnn_layer_size)
# test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])
logits, final_state = build_nn(test_cell, test_rnn_size, test_input_data, test_vocab_size, test_embed_dim)
# Check name
assert hasattr(final_state, 'name'), \
'Final state doesn\'t have the "name" attribute. Are you using build_rnn?'
assert final_state.name == 'final_state:0', \
'Final state doesn\'t have the correct name. Found the name {}. Are you using build_rnn?'.format(final_state.name)
# Check Shape
assert logits.get_shape().as_list() == test_input_data_shape + [test_vocab_size], \
'Outputs has wrong shape. Found shape {}'.format(logits.get_shape())
assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size], \
'Final state wrong shape. Found shape {}'.format(final_state.get_shape())
_print_success_message()
def test_get_tensors(get_tensors):
test_graph = tf.Graph()
with test_graph.as_default():
test_input = tf.placeholder(tf.int32, name='input')
test_initial_state = tf.placeholder(tf.int32, name='initial_state')
test_final_state = tf.placeholder(tf.int32, name='final_state')
test_probs = tf.placeholder(tf.float32, name='probs')
input_text, initial_state, final_state, probs = get_tensors(test_graph)
# Check correct tensor
assert input_text == test_input,\
'Test input is wrong tensor'
assert initial_state == test_initial_state, \
'Initial state is wrong tensor'
assert final_state == test_final_state, \
'Final state is wrong tensor'
assert probs == test_probs, \
'Probabilities is wrong tensor'
_print_success_message()
def test_pick_word(pick_word):
with tf.Graph().as_default():
test_probabilities = np.array([0.1, 0.8, 0.05, 0.05])
test_int_to_vocab = {word_i: word for word_i, word in enumerate(['this', 'is', 'a', 'test'])}
pred_word = pick_word(test_probabilities, test_int_to_vocab)
# Check type
assert isinstance(pred_word, str),\
'Predicted word is wrong type. Found {} type.'.format(type(pred_word))
# Check word is from vocab
assert pred_word in test_int_to_vocab.values(),\
'Predicted word not found in int_to_vocab.'
_print_success_message() |
the-stack_106_30149 | # ###########################################################################
#
# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)
# (C) Cloudera, Inc. 2021
# All rights reserved.
#
# Applicable Open Source License: Apache 2.0
#
# NOTE: Cloudera open source products are modular software products
# made up of hundreds of individual components, each of which was
# individually copyrighted. Each Cloudera open source product is a
# collective work under U.S. Copyright Law. Your license to use the
# collective work is as provided in your written agreement with
# Cloudera. Used apart from the collective work, this file is
# licensed for your use pursuant to the open source license
# identified above.
#
# This code is provided to you pursuant a written agreement with
# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute
# this code. If you do not have a written agreement with Cloudera nor
# with an authorized and properly licensed third party, you do not
# have any rights to access nor to use this code.
#
# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the
# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY
# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED
# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO
# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,
# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS
# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE
# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR
# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES
# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF
# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF
# DATA.
#
# ###########################################################################
from typing import List, Optional
import glob
def make_video_table(
data_dir: str,
youtube_ids: List[str],
ground_truth_labels: List[str],
predictions: Optional[List[str]] = None,
) -> str:
"""
Make an HTML table where each cell contains a video and metadata.
Inputs:
youtube_ids: list of strings of YouTube ids, one for each video to display
these videos should be part of the Kinetics dataset
ground_truth_labels: list of strings of ground truth labels, one for each video
predictions: [optional] list of strings of model predictions, one for each video
Outputs:
video_html: a list of HTML tags that build a table; to be called with IPython.display.HTML
Example:
from IPython.display import HTML
HTML(make_video_table(YOUTUBE_IDS, TRUE_LABELS_STR))
"""
VIDEOS_PER_ROW = 4
NO_ROWS = len(youtube_ids) // VIDEOS_PER_ROW + 1
WIDTH = 210
HEIGHT = WIDTH * 2 // 3
# for videos to properly display, data directory must be relative to notebook dir
try:
data_dir = data_dir[data_dir.find("data"):]
except:
pass
filepaths = []
for youtube_id in youtube_ids:
filepaths.append(glob.glob(f"{data_dir}/*/{youtube_id}_*.mp4")[0])
# make html video table
video_html = ["<table><tr>"]
i = 0
while i < len(filepaths):
prediction_par = ""
if predictions is not None:
color = "black" if predictions[i] == ground_truth_labels[i] else "red"
prediction_par = f"<p style='color:{color};'>{predictions[i]}</p>"
video_html.append(
f"""
<td><h2>{i}</h2><p>{ground_truth_labels[i]}</p><video width="{WIDTH}" height="{HEIGHT}" controls>
<source src="{filepaths[i]}" type="video/mp4">
</video>{prediction_par}</td>"""
)
i += 1
if i % VIDEOS_PER_ROW == 0:
video_html.append("</tr><tr>")
video_html.append("</tr></table>")
return "".join(video_html)
|
the-stack_106_30150 | #!/usr/bin/env python
# Copyright (c) 2014-2018 Michael Hirsch, Ph.D.
"""
converts right ascension, declination to azimuth, elevation and vice versa.
Normally do this via AstroPy.
These functions are fallbacks for those wihtout AstroPy.
Michael Hirsch implementation of algorithms from D. Vallado
"""
from datetime import datetime
from numpy import sin, cos, degrees, radians, arcsin, arctan2, atleast_1d
from typing import Tuple
from .sidereal import datetime2sidereal
__all__ = ['azel2radec', 'radec2azel']
def azel2radec(az_deg: float, el_deg: float,
lat_deg: float, lon_deg: float,
time: datetime) -> Tuple[float, float]:
"""
converts azimuth, elevation to right ascension, declination
Parameters
----------
az_deg : float or numpy.ndarray of float
azimuth (clockwise) to point [degrees]
el_deg : float or numpy.ndarray of float
elevation above horizon to point [degrees]
lat_deg : float
observer WGS84 latitude [degrees]
lon_deg : float
observer WGS84 longitude [degrees]
time : datetime.datetime
time of observation
Results
-------
ra_deg : float or numpy.ndarray of float
right ascension to target [degrees]
dec_deg : float or numpy.ndarray of float
declination of target [degrees]
from D.Vallado Fundamentals of Astrodynamics and Applications
p.258-259
"""
az = atleast_1d(az_deg)
el = atleast_1d(el_deg)
lat = atleast_1d(lat_deg)
lon = atleast_1d(lon_deg)
if az.shape != el.shape:
raise ValueError('az and el must be same shape ndarray')
if not(lat.size == 1 and lon.size == 1):
raise ValueError('need one observer and one or more (az,el).')
if ((lat < -90) | (lat > 90)).any():
raise ValueError('-90 <= lat <= 90')
az = radians(az)
el = radians(el)
lat = radians(lat)
lon = radians(lon)
# %% Vallado "algorithm 28" p 268
dec = arcsin(sin(el) * sin(lat) + cos(el) * cos(lat) * cos(az))
lha = arctan2(-(sin(az) * cos(el)) / cos(dec),
(sin(el) - sin(lat) * sin(dec)) / (cos(dec) * cos(lat)))
lst = datetime2sidereal(time, lon) # lon, ra in RADIANS
""" by definition right ascension [0, 360) degrees """
return degrees(lst - lha) % 360, degrees(dec)
def radec2azel(ra_deg: float, dec_deg: float,
lat_deg: float, lon_deg: float,
time: datetime) -> Tuple[float, float]:
"""
converts right ascension, declination to azimuth, elevation
Parameters
----------
ra_deg : float or numpy.ndarray of float
right ascension to target [degrees]
dec_deg : float or numpy.ndarray of float
declination to target [degrees]
lat_deg : float
observer WGS84 latitude [degrees]
lon_deg : float
observer WGS84 longitude [degrees]
time : datetime.datetime
time of observation
Results
-------
az_deg : float or numpy.ndarray of float
azimuth clockwise from north to point [degrees]
el_deg : float or numpy.ndarray of float
elevation above horizon to point [degrees]
from D. Vallado "Fundamentals of Astrodynamics and Applications "
4th Edition Ch. 4.4 pg. 266-268
"""
ra = atleast_1d(ra_deg)
dec = atleast_1d(dec_deg)
lat = atleast_1d(lat_deg)
lon = atleast_1d(lon_deg)
if ra.shape != dec.shape:
raise ValueError('az and el must be same shape ndarray')
if not(lat.size == 1 and lon.size == 1):
raise ValueError('need one observer and one or more (az,el).')
if ((lat < -90) | (lat > 90)).any():
raise ValueError('-90 <= lat <= 90')
ra = radians(ra)
dec = radians(dec)
lat = radians(lat)
lon = radians(lon)
lst = datetime2sidereal(time, lon) # RADIANS
# %% Eq. 4-11 p. 267 LOCAL HOUR ANGLE
lha = lst - ra
# %% #Eq. 4-12 p. 267
el = arcsin(sin(lat) * sin(dec) + cos(lat) * cos(dec) * cos(lha))
# %% combine Eq. 4-13 and 4-14 p. 268
az = arctan2(-sin(lha) * cos(dec) / cos(el),
(sin(dec) - sin(el) * sin(lat)) / (cos(el) * cos(lat)))
return degrees(az) % 360.0, degrees(el)
|
the-stack_106_30152 | #!/usr/bin/env python
"""
This is a test of the API Validator component, it's multimatch feature in
particular.
The treatment of each configured validator by the filter can be broken down
into the following hierarchy:
-> Not considered (N)
-> Considered (C) -> Skipped (S)
-> Considered (C) -> Tried (T) -> Passed (P)
-> Considered (C) -> Tried (T) -> Failed (F, F4, or F5)
There are two kinds of failures:
if the request is for a resource that isn't present in the wadl, a 404 will
be returned (F4)
if the request uses a method that is not allowed for the specified resouce,
a 405 will be returned (F5)
If none of the roles match, then the response code will be 403. As a result,
this is denoted as 'F3', although no validator can be configured to return a
403.
If @multi-role-match is false and none of the roles match (all S), then the
default validator will be tried afterwards.
If @multi-role-match is true, then the default will be tried before any other
validators.
We define some notation:
Notation for validator configuration
Sequence of symbols representing the validators in order, and what they
would result in if tried. If multi-match is enabled, then the sequence is
preceded by 'M'
F4F4PF5F5
MF4PF5P
Notation for test
Validator configuration notation, followed by '\' and a number (or numbers)
indicating which validators will be tried, followed by '->' and the
expected result. Expected result is one of (P, F3, F4, F5).
F4F4PF5F5\3 -> P
P\0 -> F3
Notation for test with default
Same as above, except it begins with a validator configuration with
parentheses '(' and ')' around the symbol for the default validator. This
is followed by an equals sign '=' and the equivalent test if we hadn't been
using any default.
F4(F5)\1 = F4F5F5\1,3 -> F4
MF5(P)F4\3 = MPF5PF4\1,4 -> P
Notation for effective pattern
A sequence of 'P', 'F', 'S', or 'N', each indicating how the filter should
treat the validator in that position. If multi-match, preceded by 'M'.
SSPNN
MF4SSP
The test cases below are intended to cover all of the required behaviors.
Obviously, we can't comprehensively test the set of all possible configurations
of the filter, so we select a few which cover the required functionality. We
model the treatment of validators (see effective pattern above) as a state
machine. We then list the transitions between states that align with the
desired behavior. Here, 'O' represents the start of the list and 'X' the end.
Single-match
------------
State transition table
| P F S N X
-------------
O | Y Y Y N ?
P | N N N Y Y
F | N N N Y Y
S | Y Y Y N Y
N | N N N Y Y
(The '?' denotes the case where the start is immediately followed by the
end of the list. That is, no validators are defined in the configuration.
The functional specification does not cover this case, so we do not test
it.)
From this, we determine that the valid transitions are:
OP, OF, OS,
PN, PX,
FN, FX,
SP, SF, SS, SX,
NN, NX
The following sequences cover all of the above transitions:
SSPNN OS, SS, SP, PN, NN, NX
P OP, PX
F OF, FX
S OS, SX
SFN OS, SF, FN, NX
Multi-match
-----------
State transition table
| P F S N X
-------------
O | Y Y Y N ?
P | N N N Y Y
F | Y Y Y N Y
S | Y Y Y N Y
N | N N N Y Y
Valid transitions:
OP, OF, OS,
PN, PX,
FP, FF, FS, FX,
SP, SF, SS, SX,
NN, NX
Covering sequences:
MSSFSFFPNN OS, SS, SF, FS, FF, FP, PN, NN, NX
MP OP, PX
MF OF, FX
MS OS, SX
MSP OS, SP, PX
Test Cases
----------
config'd pattern effective pattern exp. result
single-match:
F4F4PF5F5\3 SSPNN P
P\0 S F3
P\1 P P
F4\1 F4 F4
PF4F5\2 SF4N F4
single-match with default:
F4(F5)\1 = F4F5F5\1,3 F4NN F4
F4(F5)\0 = F4F5F5\3 SSF5 F5
multi-match:
MF4F4F5F4F5F5PF4F4\3,5,6,7 MSSF5SF5F5PNN P
MF4F4F5F4F5F5PF4F4\3,5,6 MSSF5SF5F5SSS F5
MP\0 MS F3
MP\1 MP P
MF4\1 MF4 F4
MF4P\2 MSP P
multi-match with default:
MF5(P)F4\3 = MPF5PF4\1,4 MPNNN P
MF5(F4)P\3 = MF4F5F4P\1,4 MF4SSP P
MP(F4)F5\3 = MF4PF4F5\1,4 MF4SSF5 F5
MP(F4)P\0 = MF4PF4P\1 MF4SSS F4
Future, outside-the-box considerations
--------------------------------------
roles
are leading and trailing spaces trimmed?
can tabs work as well as spaces?
are leading and trailing tabs trimmed?
what about other unicode whitespace?
qvalue
make sure not specifying q actually translates to the default of 1
what happens if q is < 0 or > 1?
what happens if q is not a number?
"""
import unittest2 as unittest
import xmlrunner
import logging
import time
import argparse
import os
import deproxy
import itertools
import narwhal
from narwhal import valve
from narwhal import conf
from narwhal import pathutil
from narwhal import get_next_open_port
logger = logging.getLogger(__name__)
config_dir = pathutil.join(os.getcwd(), 'etc/repose')
deployment_dir = pathutil.join(os.getcwd(), 'var/repose')
artifact_dir = pathutil.join(os.getcwd(), 'usr/share/repose/filters')
log_file = pathutil.join(os.getcwd(), 'var/log/repose/current.log')
d = None
deproxy_port = get_next_open_port()
def setUpModule():
# Set up folder hierarchy
logger.debug('setUpModule')
pathutil.create_folder(deployment_dir)
pathutil.create_folder(os.path.dirname(log_file))
global d
if d is None:
d = deproxy.Deproxy()
d.add_endpoint(('localhost', deproxy_port))
def tearDownModule():
logger.debug('')
if d is not None:
logger.debug('shutting down deproxy')
d.shutdown_all_endpoints()
logger.debug('deproxy shut down')
def apply_configs(folder, repose_port):
params = {
'target_hostname': 'localhost',
'target_port': deproxy_port,
'port': repose_port,
'repose_port': repose_port,
}
conf.process_folder_contents(folder=folder, dest_path='etc/repose',
params=params)
def start_repose(repose_port):
return valve.Valve(config_dir='etc/repose', stop_port=get_next_open_port(),
wait_on_start=True, port=repose_port)
def configure_and_start_repose(folder, repose_port):
# set the common config files, like system model and container
apply_configs(folder='configs/common', repose_port=repose_port)
# set the pattern-specific config files, i.e. validator.cfg.xml
apply_configs(folder=folder, repose_port=repose_port)
return start_repose(repose_port=repose_port)
class TestSspnn(unittest.TestCase):
@classmethod
def setUpClass(cls):
logger.debug('')
repose_port = get_next_open_port()
cls.valve = configure_and_start_repose(folder='configs/f4f4pf5f5',
repose_port=repose_port)
cls.url = 'http://localhost:%i/resource' % repose_port
def test_sspnn(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-3'})
self.assertEqual(mc.received_response.code, '200')
self.assertEqual(len(mc.handlings), 1)
# the following test_* methods check how it responds to multiple roles in
# different orders
def test_pass_first_of_two(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-3,role-4'})
self.assertEqual(mc.received_response.code, '200')
self.assertEqual(len(mc.handlings), 1)
def test_pass_second_of_two(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-4,role-3'})
self.assertEqual(mc.received_response.code, '200')
self.assertEqual(len(mc.handlings), 1)
def test_fail_first_of_two(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-2,role-3'})
self.assertEqual(mc.received_response.code, '404')
self.assertEqual(len(mc.handlings), 0)
def test_fail_second_of_two(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-3,role-2'})
self.assertEqual(mc.received_response.code, '404')
self.assertEqual(len(mc.handlings), 0)
@classmethod
def tearDownClass(cls):
logger.debug('stopping repose')
cls.valve.stop()
logger.debug('repose stopped')
class TestPAndS(unittest.TestCase):
@classmethod
def setUpClass(cls):
logger.debug('')
repose_port = get_next_open_port()
cls.valve = configure_and_start_repose(folder='configs/p',
repose_port=repose_port)
cls.url = 'http://localhost:%i/resource' % repose_port
def test_s(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-0'})
self.assertEqual(mc.received_response.code, '403')
self.assertEqual(len(mc.handlings), 0)
def test_p(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-1'})
self.assertEqual(mc.received_response.code, '200')
self.assertEqual(len(mc.handlings), 1)
@classmethod
def tearDownClass(cls):
logger.debug('stopping repose')
cls.valve.stop()
logger.debug('repose stopped')
class TestF(unittest.TestCase):
@classmethod
def setUpClass(cls):
logger.debug('')
repose_port = get_next_open_port()
cls.valve = configure_and_start_repose(folder='configs/f4',
repose_port=repose_port)
cls.url = 'http://localhost:%i/resource' % repose_port
def test_f(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-1'})
self.assertEqual(mc.received_response.code, '404')
self.assertEqual(len(mc.handlings), 0)
@classmethod
def tearDownClass(cls):
logger.debug('stopping repose')
cls.valve.stop()
logger.debug('repose stopped')
class TestSfn(unittest.TestCase):
@classmethod
def setUpClass(cls):
logger.debug('')
repose_port = get_next_open_port()
cls.valve = configure_and_start_repose(folder='configs/pf4f5',
repose_port=repose_port)
cls.url = 'http://localhost:%i/resource' % repose_port
def test_sfn(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-2'})
self.assertEqual(mc.received_response.code, '404')
self.assertEqual(len(mc.handlings), 0)
@classmethod
def tearDownClass(cls):
logger.debug('stopping repose')
cls.valve.stop()
logger.debug('repose stopped')
class TestSingleMatchDefaults(unittest.TestCase):
@classmethod
def setUpClass(cls):
logger.debug('')
repose_port = get_next_open_port()
cls.valve = configure_and_start_repose(folder='configs/s-default',
repose_port=repose_port)
cls.url = 'http://localhost:%i/resource' % repose_port
def test_normal(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-1'})
self.assertEqual(mc.received_response.code, '404')
self.assertEqual(len(mc.handlings), 0)
def test_activate_default(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-0'})
self.assertEqual(mc.received_response.code, '405')
self.assertEqual(len(mc.handlings), 0)
@classmethod
def tearDownClass(cls):
logger.debug('stopping repose')
cls.valve.stop()
logger.debug('repose stopped')
class TestMssfsffpnn(unittest.TestCase):
@classmethod
def setUpClass(cls):
logger.debug('')
repose_port = get_next_open_port()
folder = 'configs/mf4f4f5f4f5f5pf4f4'
cls.valve = configure_and_start_repose(folder=folder,
repose_port=repose_port)
cls.url = 'http://localhost:%i/resource' % repose_port
def test_mssfsffpnn(self):
mc = d.make_request(url=self.url, headers={'X-Roles':
'role-3,role-5,role-6,role-7'})
self.assertEqual(mc.received_response.code, '200')
self.assertEqual(len(mc.handlings), 1)
def test_mssfsffsss(self):
mc = d.make_request(url=self.url, headers={'X-Roles':
'role-3,role-5,role-6'})
self.assertEqual(mc.received_response.code, '405')
self.assertEqual(len(mc.handlings), 0)
def test_msssssspnn(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-7,role-8'})
self.assertEqual(mc.received_response.code, '200')
self.assertEqual(len(mc.handlings), 1)
def test_mssfssspnn_order(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-7,role-3'})
self.assertEqual(mc.received_response.code, '200')
self.assertEqual(len(mc.handlings), 1)
@classmethod
def tearDownClass(cls):
logger.debug('stopping repose')
cls.valve.stop()
logger.debug('repose stopped')
class TestMpAndMs(unittest.TestCase):
@classmethod
def setUpClass(cls):
logger.debug('')
repose_port = get_next_open_port()
cls.valve = configure_and_start_repose(folder='configs/mp',
repose_port=repose_port)
cls.url = 'http://localhost:%i/resource' % repose_port
def test_s(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-0'})
self.assertEqual(mc.received_response.code, '403')
self.assertEqual(len(mc.handlings), 0)
def test_p(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-1'})
self.assertEqual(mc.received_response.code, '200')
self.assertEqual(len(mc.handlings), 1)
@classmethod
def tearDownClass(cls):
logger.debug('stopping repose')
cls.valve.stop()
logger.debug('repose stopped')
class TestMf(unittest.TestCase):
@classmethod
def setUpClass(cls):
logger.debug('')
repose_port = get_next_open_port()
cls.valve = configure_and_start_repose(folder='configs/mf4',
repose_port=repose_port)
cls.url = 'http://localhost:%i/resource' % repose_port
def test_f(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-1'})
self.assertEqual(mc.received_response.code, '404')
self.assertEqual(len(mc.handlings), 0)
@classmethod
def tearDownClass(cls):
logger.debug('stopping repose')
cls.valve.stop()
logger.debug('repose stopped')
class TestMsp(unittest.TestCase):
@classmethod
def setUpClass(cls):
logger.debug('')
repose_port = get_next_open_port()
cls.valve = configure_and_start_repose(folder='configs/mf4p',
repose_port=repose_port)
cls.url = 'http://localhost:%i/resource' % repose_port
def test_msp(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-2'})
self.assertEqual(mc.received_response.code, '200')
self.assertEqual(len(mc.handlings), 1)
@classmethod
def tearDownClass(cls):
logger.debug('stopping repose')
cls.valve.stop()
logger.debug('repose stopped')
class TestMultimatchMatchDefaults1(unittest.TestCase):
"""This TestCase checks that the default runs after skips and failures."""
@classmethod
def setUpClass(cls):
logger.debug('')
repose_port = get_next_open_port()
cls.valve = configure_and_start_repose(folder='configs/m-default-1',
repose_port=repose_port)
cls.url = 'http://localhost:%i/resource' % repose_port
def test_ssf_default_p(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-3'})
self.assertEqual(mc.received_response.code, '200')
self.assertEqual(len(mc.handlings), 1)
@classmethod
def tearDownClass(cls):
logger.debug('stopping repose')
cls.valve.stop()
logger.debug('repose stopped')
class TestMultimatchMatchDefaults2(unittest.TestCase):
"""This TestCase checks that the default doesn't overwrite a pass."""
@classmethod
def setUpClass(cls):
logger.debug('')
repose_port = get_next_open_port()
cls.valve = configure_and_start_repose(folder='configs/m-default-2',
repose_port=repose_port)
cls.url = 'http://localhost:%i/resource' % repose_port
def test_ssp_default_f(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-3'})
self.assertEqual(mc.received_response.code, '200')
self.assertEqual(len(mc.handlings), 1)
@classmethod
def tearDownClass(cls):
logger.debug('stopping repose')
cls.valve.stop()
logger.debug('repose stopped')
class TestMultimatchMatchDefaults3(unittest.TestCase):
"""This TestCase checks that the default is tried before anything else."""
@classmethod
def setUpClass(cls):
logger.debug('')
repose_port = get_next_open_port()
cls.valve = configure_and_start_repose(folder='configs/m-default-3',
repose_port=repose_port)
cls.url = 'http://localhost:%i/resource' % repose_port
def test_ssf_default_f(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-3'})
self.assertEqual(mc.received_response.code, '405')
self.assertEqual(len(mc.handlings), 0)
@classmethod
def tearDownClass(cls):
logger.debug('stopping repose')
cls.valve.stop()
logger.debug('repose stopped')
class TestMultimatchMatchDefaults4(unittest.TestCase):
"""This TestCase checks that the default runs if none of the roles
matched.
"""
@classmethod
def setUpClass(cls):
logger.debug('')
repose_port = get_next_open_port()
cls.valve = configure_and_start_repose(folder='configs/m-default-4',
repose_port=repose_port)
cls.url = 'http://localhost:%i/resource' % repose_port
def test_sss_default_f(self):
mc = d.make_request(url=self.url, headers={'X-Roles': 'role-0'})
self.assertEqual(mc.received_response.code, '404')
self.assertEqual(len(mc.handlings), 0)
@classmethod
def tearDownClass(cls):
logger.debug('stopping repose')
cls.valve.stop()
logger.debug('repose stopped')
def run():
parser = argparse.ArgumentParser()
parser.add_argument('--print-log', action='store_true',
help='Print the log.')
parser.add_argument('--port-base', help='The port number to start looking '
'for open ports. The default is %i.' % narwhal.port_base,
default=narwhal.port_base, type=int)
args = parser.parse_args()
if args.print_log:
logging.basicConfig(level=logging.DEBUG,
format=('%(asctime)s %(levelname)s:%(name)s:'
'%(funcName)s:'
'%(filename)s(%(lineno)d):'
'%(threadName)s(%(thread)d):%(message)s'))
narwhal.port_base = args.port_base
test_runner = xmlrunner.XMLTestRunner(output='test-reports')
try:
setUpModule()
unittest.main(argv=[''], testRunner=test_runner)
finally:
tearDownModule()
if __name__ == '__main__':
run()
|
the-stack_106_30153 | from enum import Enum
import logging
from dialog_api import media_and_files_pb2
from dialog_bot_sdk.entities.Peer import Peer, PeerType
from dialog_bot_sdk.entities.UUID import UUID
from pymongo import MongoClient
from config import *
import requests
from datetime import datetime, timedelta
import re
from time import sleep
import pytz
import base64
from bot import *
logger = logging.getLogger('remindbot')
logger.setLevel(logging.DEBUG)
ch = logging.FileHandler(LOGS_FILE)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class States(Enum):
START = '0'
ENTER_EVENT = '1'
ENTER_TIME = '2'
ENTER_PERIODICITY = '3'
class Tables(Enum):
STATES = 'states'
LAST_EVENT = 'last_event'
EVENTS = 'events'
class RemindStrategy(Strategy):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client = MongoClient(MONGODBLINK)
self.db = self.client[DBNAME]
self.kill = False
def check_code(self, ret):
if ret.status_code != 200:
logging.error('error:' + str(ret.status_code) + str(ret.content))
return ''
def get_asm_data(self, file=None, task_id=None):
if file:
return {
'apikey': APIKEY, # (str or None)
'model': MODEL, # (str)
'wav': file,
'vad_model': VAD, # (None or str) default: None
'async': True, # (bool) default: False
'nbest': 1, # (int) default in model config: 3
}
else:
return {
'apikey': APIKEY,
'task_id': task_id
}
def text_from_voice(self, uid, file_id, access_hash):
try:
req = media_and_files_pb2.RequestGetFileUrl(
file=media_and_files_pb2.FileLocation(file_id=file_id, access_hash=access_hash))
url = self.bot.internal.media_and_files.GetFileUrl(req).url
doc = requests.get(url)
wav_base64 = base64.b64encode(doc.content).decode()
url = 'http://{}/file'.format(ADDRESS)
data = self.get_asm_data(file=wav_base64)
r = requests.post(url, json=data)
if self.check_code(r):
return ''
task_id = r.json()
url = 'http://{}/task/status'.format(ADDRESS)
data = self.get_asm_data(task_id=task_id)
while True:
r = requests.post(url, json=data)
if self.check_code(r):
return ''
ret = r.json()
if ret['done'] == True:
break
sleep(1)
url = 'http://{}/task/result'.format(ADDRESS)
r = requests.post(url, json=data)
if self.check_code(r):
return ''
ret = r.json()
for res in ret:
speech = res['speec_info']
if 'text' in speech:
text = speech['text']
return text
return ''
except Exception as e:
logger.exception(e)
return ''
def get_value(self, uid, table):
val = self.db[table].find_one({'_id': uid})
if val is None:
return States.START.value
return val['value']
def reset_state(self, uid):
return self.set_value(uid, States.START.value, Tables.STATES.value)
def increment_value(self, uid, value, table):
self.set_value(uid, str(int(value) + 1), table)
def set_value(self, uid, value, table):
self.db[table].replace_one({'_id': uid}, {'value': value}, upsert=True)
return value
def update_value(self, _id, field, value):
self.db.events.update_one({"_id": _id}, {"$set": {field: value}})
def save_event(self, uid, text=None, time=None, periodicity=None):
'''time: timedelta'''
is_completed = (time is not None) and (periodicity is not None)
event = {'uid': uid, 'text': text,'time': None,
'periodicity': periodicity, 'hours': None,
'minutes': None, 'is_completed': is_completed}
if time is not None:
for option in ['hours', 'minutes']:
event[option] = time[option]
if is_completed:
(event['time'], tz) = self.make_event_time(uid, time, periodicity)
self.send_finish_msg(uid, periodicity, text, event['time'], tz)
last_event = self.db.events.insert_one(event).inserted_id
self.set_value(uid, last_event, Tables.LAST_EVENT.value)
def update_event(self, uid, mid, time=None, periodicity=None):
_id = self.get_value(uid, Tables.LAST_EVENT.value)
msg = self.bot.messaging.get_messages_by_id([UUID.from_api(mid)]).wait()[0]
if time is not None:
self.update_value(_id, *time)
if periodicity is not None:
self.update_value(_id, 'periodicity', periodicity)
value = [y for (x, y) in PERIODICITY if x == periodicity][0]
self.bot.messaging.update_message(msg, msg.message.text_message.text + ' \n ' + value)
event = self.db.events.find_one({'_id': _id})
if event['hours'] is not None and event['minutes'] is not None:
if time is not None:
self.bot.messaging.update_message(msg, msg.message.text_message.text + ' \n ' +
'{:02d}:{:02d}'.format(int(event['hours']), int(event['minutes'])))
if event['periodicity'] is not None:
self.on_event_completed(event)
def get_delta_for_periodicity(self, periodicity, event_time):
if periodicity == 'tomorrow' or (
periodicity == 'everyday' and event_time <= datetime.utcnow()):
return timedelta(days=1)
return timedelta(days=0)
def make_event_time(self, uid, time, periodicity):
tz = self.get_tz(uid)
user_day = (datetime.utcnow() + tz).replace(hour=0, minute=0, second=0, microsecond=0)
time = user_day + timedelta(**time) - tz
time += self.get_delta_for_periodicity(periodicity, time)
return (time, tz)
def send_finish_msg(self, uid, periodicity, text, time, tz):
if periodicity == 'everyday':
if time.day != datetime.utcnow().day:
periodicity = 'tomorrow'
else:
periodicity = 'today'
day = [y for (x, y) in PERIODICITY if periodicity == x][0].lower()
time += tz
time = time.strftime("%H:%M")
self.bot.messaging.send_message(Peer(uid, PeerType.PEERTYPE_PRIVATE),
BOT_ANSWERS['FINISH'].format(day, text, time))
def on_event_completed(self, event):
uid = event['uid']
periodicity = event['periodicity']
_id = self.get_value(uid, Tables.LAST_EVENT.value)
event = self.db.events.find_one({'_id': _id})
(time, tz) = self.make_event_time(uid, {key: int(event[key]) for key in ['hours', 'minutes']},
periodicity)
self.update_value(_id, 'time', time)
self.update_value(_id, 'is_completed', True)
self.send_finish_msg(uid, periodicity, event['text'], time, tz)
def send_time_select(self, peer):
self.bot.messaging.send_message(peer, BOT_ANSWERS[States.ENTER_TIME.name],
[InteractiveMediaGroup(
[self.select('Часы',
{str(x) : str(x) for x in range(0, 24)}, 'hours'),
self.select('Минуты',
{str(x) : str(x) for x in range(0, 60)}, 'minutes')])])
def _handle_start(self, peer):
uid = peer.id
state = self.reset_state(uid)
name = self.bot.users.get_user_by_id(uid).wait().data.name
self.bot.messaging.send_message(peer, BOT_ANSWERS['START'].format(name))
self.increment_value(uid, state, Tables.STATES.value)
def get_tz(self, uid):
tz = self.bot.users.get_full_profile_by_id(uid).wait().time_zone
if not re.compile(r'^[+-][0-9][0-9][: ][0-6][0-9]$').search(tz):
utc_now = pytz.utc.localize(datetime.utcnow())
tz = utc_now.astimezone(pytz.timezone(tz)).strftime('%z')
if ':' in tz:
tz = tz.replace(':', '')
t = datetime.strptime(tz[1:], "%H%M")
tdelta = timedelta(hours=t.hour, minutes=t.minute)
if tz[0] == '-':
return -tdelta
return tdelta
def find_time(self, text):
time_prep = 'в '
res = re.compile(r'({})?([0-1]?[0-9]|2[0-3])[:. ][0-5][0-9]'.format(time_prep)).search(text)
time = None
if res:
time = res.group(0)
text = text.replace(time, '').strip()
if time_prep in time:
time = time.replace(time_prep, '')
if '.' in time:
time = time.replace('.', ':')
time = time.replace(' ', ':')
time = {key: int(value) for (key, value) in zip(['hours', 'minutes'], time.split(':'))}
return (time, text)
def find_periodicity(self, text):
for word in STOP_WORDS:
if text.startswith(word):
text = text.replace(word + ' ', '', 1)
for (period_id, period) in PERIODICITY:
period = period.lower()
idx = text.find(period)
if idx != -1:
periodicity = period_id
if idx == 0:
text = text.replace(period + ' ', '', 1)
elif idx + len(period) == len(text):
text = text[:-len(' ' + period)]
return (text, periodicity)
return (text, None)
def _handle_event(self, peer, text):
periodicity = None
time = None
text = text.lower()
if text[-1] in ['.', '?', '!']:
text = text[:-1]
(time, text) = self.find_time(text)
(text, periodicity) = self.find_periodicity(text)
text = text.strip()
self.save_event(peer.id, text, time, periodicity)
if not time:
self.send_time_select(peer)
if not periodicity:
self.buttons(peer, **BOT_ANSWERS[States.ENTER_PERIODICITY.name])
def on_msg(self, params):
try:
uid = params.peer.id
peer = params.peer
if peer.id != params.sender_peer.id:
return
text = params.message.text_message.text
doc_msg = params.message.document_message
file_id = doc_msg.file_id
access_hash = doc_msg.access_hash
if file_id != 0 and access_hash!=0:
text = self.text_from_voice(uid, file_id, access_hash)
if text == '/start':
self._handle_start(peer)
elif text == '':
self.bot.messaging.send_message(peer, BOT_ANSWERS['ERROR'])
else:
self._handle_event(peer, text)
except Exception as e:
logging.exception(e)
self.kill = True
raise e
def on_click(self, params):
try:
peer = params.peer
value = params.value
uid = peer.id
param_id = params.id
if param_id in ['hours', 'minutes']:
time = self.update_event(uid, params.mid, time=[param_id, value])
if value in [x[0] for x in PERIODICITY]:
self.update_event(uid, params.mid, periodicity=value)
except Exception as e:
logging.exception(e)
self.kill = True
raise e
def strategy(self):
while True:
if self.kill:
return
try:
now = datetime.utcnow()
for x in self.db.events.find({'is_completed': True, 'time': {'$lt': now - timedelta(seconds=1),
'$gt':now - timedelta(minutes=10)}}):
self.bot.messaging.send_message(Peer(x['uid'], PeerType.PEERTYPE_PRIVATE),
BOT_ANSWERS['REMIND'] + x['text'])
if x['periodicity'] == 'everyday':
time = x['time'] + timedelta(days=1)
self.update_value(x['_id'], 'time', time)
else:
self.db.events.remove(x)
except Exception as e:
logging.exception(e)
continue
if __name__ == '__main__':
while True:
try:
logger.info('Start')
strategy = RemindStrategy(token=BOT_TOKEN,
endpoint=BOT_ENDPOINT,async_=True)
strategy.start()
except Exception as e:
logger.exception(e)
continue
|
the-stack_106_30154 | # coding=utf-8
# Copyright 2020 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNet with BatchEnsemble."""
import os
import time
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow_datasets as tfds
import efficientnet_be_model # local file import
import utils # local file import
import uncertainty_metrics as um
# ~312.78 steps per epoch for 4x4 TPU; per_core_batch_size=128; 350 epochs;
# TODO(trandustin): Tune results.
# General model flags
flags.DEFINE_enum('model_name',
default='efficientnet-b0',
enum_values=['efficientnet-b0', 'efficientnet-b1',
'efficientnet-b2', 'efficientnet-b3'],
help='Efficientnet model name.')
flags.DEFINE_integer('ensemble_size', 4, 'Size of ensemble.')
flags.DEFINE_integer('per_core_batch_size', 128, 'Batch size per TPU core/GPU.')
flags.DEFINE_float('random_sign_init', -0.5,
'Use random sign init for fast weights.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_float('base_learning_rate', 0.016,
'Base learning rate when train batch size is 256.')
flags.DEFINE_float('fast_weight_lr_multiplier', 0.5,
'fast weights lr multiplier.')
flags.DEFINE_float('l2', 5e-6, 'L2 coefficient.')
flags.DEFINE_string('data_dir', '', 'Path to training and testing data.')
flags.mark_flag_as_required('data_dir')
flags.DEFINE_string('output_dir', '/tmp/imagenet',
'The directory where the model weights and '
'training/evaluation summaries are stored.')
flags.DEFINE_integer('train_epochs', 350, 'Number of training epochs.')
flags.DEFINE_integer('checkpoint_interval', 15,
'Number of epochs between saving checkpoints. Use -1 to '
'never save checkpoints.')
flags.DEFINE_integer('evaluation_interval', 5, 'How many epochs to run test.')
flags.DEFINE_string('alexnet_errors_path', None,
'Path to AlexNet corruption errors file.')
flags.DEFINE_float('label_smoothing', 0.1, 'label smoothing constant.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE computation.')
# Accelerator flags.
flags.DEFINE_bool('use_gpu', False, 'Whether to run on GPU or otherwise TPU.')
flags.DEFINE_bool('use_bfloat16', True, 'Whether to use mixed precision.')
flags.DEFINE_integer('num_cores', 32, 'Number of TPU cores or number of GPUs.')
flags.DEFINE_string('tpu', None,
'Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS
# Number of images in ImageNet-1k train dataset.
APPROX_IMAGENET_TRAIN_IMAGES = 1281167
IMAGENET_VALIDATION_IMAGES = 50000
NUM_CLASSES = 1000
def main(argv):
del argv # unused arg
tf.io.gfile.makedirs(FLAGS.output_dir)
logging.info('Saving checkpoints at %s', FLAGS.output_dir)
tf.random.set_seed(FLAGS.seed)
per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size
batch_size = per_core_batch_size * FLAGS.num_cores
steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size
steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size
if FLAGS.use_gpu:
logging.info('Use GPU')
strategy = tf.distribute.MirroredStrategy()
else:
logging.info('Use TPU at %s',
FLAGS.tpu if FLAGS.tpu is not None else 'local')
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
width_coefficient, depth_coefficient, input_image_size, dropout_rate = (
efficientnet_be_model.efficientnet_params(FLAGS.model_name))
builder = utils.ImageNetInput(data_dir=FLAGS.data_dir,
use_bfloat16=FLAGS.use_bfloat16,
image_size=input_image_size,
normalize_input=True,
one_hot=True)
train_dataset = builder.as_dataset(
split=tfds.Split.TRAIN,
batch_size=FLAGS.per_core_batch_size * FLAGS.num_cores)
clean_test_dataset = builder.as_dataset(split=tfds.Split.TEST,
batch_size=batch_size)
train_dataset = strategy.experimental_distribute_dataset(train_dataset)
test_datasets = {
'clean': strategy.experimental_distribute_dataset(clean_test_dataset)
}
train_iterator = iter(train_dataset)
test_iterator = iter(test_datasets['clean'])
if FLAGS.use_bfloat16:
policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
tf.keras.mixed_precision.experimental.set_policy(policy)
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.output_dir, 'summaries'))
with strategy.scope():
logging.info('Building %s model', FLAGS.model_name)
model = efficientnet_be_model.Model(
width_coefficient,
depth_coefficient,
dropout_rate,
ensemble_size=FLAGS.ensemble_size,
random_sign_init=FLAGS.random_sign_init)
scaled_lr = FLAGS.base_learning_rate * (batch_size / 256.0)
# Decay epoch is 2.4, warmup epoch is 5 according to the Efficientnet paper.
decay_steps = steps_per_epoch * 2.4
warmup_step = steps_per_epoch * 5
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
scaled_lr, decay_steps, decay_rate=0.97, staircase=True)
learning_rate = utils.WarmupDecaySchedule(lr_schedule, warmup_step)
optimizer = tf.keras.optimizers.RMSprop(
learning_rate, rho=0.9, momentum=0.9, epsilon=0.001)
metrics = {
'train/negative_log_likelihood': tf.keras.metrics.Mean(),
'train/accuracy': tf.keras.metrics.CategoricalAccuracy(),
'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
'train/loss': tf.keras.metrics.Mean(),
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.CategoricalAccuracy(),
'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
}
logging.info('Finished building %s model', FLAGS.model_name)
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
initial_epoch = 0
if latest_checkpoint:
# checkpoint.restore must be within a strategy.scope() so that optimizer
# slot variables are mirrored.
checkpoint.restore(latest_checkpoint)
logging.info('Loaded checkpoint %s', latest_checkpoint)
initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
def train_step(inputs):
"""Build `step_fn` for efficientnet learning."""
images, labels = inputs
images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
labels = tf.tile(labels, [FLAGS.ensemble_size, 1])
num_replicas = tf.cast(strategy.num_replicas_in_sync, tf.float32)
l2_coeff = tf.cast(FLAGS.l2, tf.float32)
with tf.GradientTape() as tape:
logits = model(images, training=True)
logits = tf.cast(logits, tf.float32)
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.categorical_crossentropy(
labels,
logits,
from_logits=True,
label_smoothing=FLAGS.label_smoothing))
filtered_variables = []
for var in model.trainable_variables:
# Apply l2 on the slow weights and bias terms. This excludes BN
# parameters and fast weight approximate posterior/prior parameters,
# but pay caution to their naming scheme.
if 'kernel' in var.name or 'bias' in var.name:
filtered_variables.append(tf.reshape(var, (-1,)))
l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
tf.concat(filtered_variables, axis=0))
loss = negative_log_likelihood + l2_coeff * l2_loss
scaled_loss = loss / num_replicas
grads = tape.gradient(scaled_loss, model.trainable_weights)
# Separate learning rate implementation.
if FLAGS.fast_weight_lr_multiplier != 1.0:
grads_and_vars = []
for grad, var in zip(grads, model.trainable_variables):
# Apply different learning rate on the fast weights. This excludes BN
# and slow weights, but pay caution to the naming scheme.
if ('batch_norm' not in var.name and 'kernel' not in var.name):
grads_and_vars.append((grad * FLAGS.fast_weight_lr_multiplier,
var))
else:
grads_and_vars.append((grad, var))
optimizer.apply_gradients(grads_and_vars)
else:
optimizer.apply_gradients(zip(grads, model.trainable_variables))
sparse_labels = tf.cast(
tf.math.argmax(labels, axis=-1, output_type=tf.int32), tf.float32)
probs = tf.nn.softmax(logits)
metrics['train/loss'].update_state(loss)
metrics['train/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['train/accuracy'].update_state(labels, logits)
metrics['train/ece'].update_state(sparse_labels, probs)
step_info = {
'loss/negative_log_likelihood': negative_log_likelihood / num_replicas,
'loss/total_loss': scaled_loss,
}
return step_info
def eval_step(inputs):
"""A single step."""
images, labels = inputs
images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
logits = model(images, training=False)
logits = tf.cast(logits, tf.float32)
probs = tf.nn.softmax(logits)
per_probs = tf.split(
probs, num_or_size_splits=FLAGS.ensemble_size, axis=0)
probs = tf.reduce_mean(per_probs, axis=0)
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.categorical_crossentropy(labels, probs))
sparse_labels = tf.cast(
tf.math.argmax(labels, axis=-1, output_type=tf.int32), tf.float32)
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].update_state(sparse_labels, probs)
@tf.function
def epoch_fn(should_eval):
"""Build `epoch_fn` for training and potential eval."""
for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)):
info = strategy.run(train_step, args=(next(train_iterator),))
optim_step = optimizer.iterations
if optim_step % tf.cast(100, optim_step.dtype) == 0:
for k, v in info.items():
v_reduce = strategy.reduce(tf.distribute.ReduceOp.SUM, v, None)
tf.summary.scalar(k, v_reduce, optim_step)
tf.summary.scalar('loss/lr', learning_rate(optim_step), optim_step)
summary_writer.flush()
if should_eval:
for _ in tf.range(tf.cast(steps_per_eval, tf.int32)):
strategy.run(eval_step, args=(next(test_iterator),))
# Main training loop.
start_time = time.time()
with summary_writer.as_default():
for epoch in range(initial_epoch, FLAGS.train_epochs):
logging.info('Starting to run epoch: %s', epoch)
should_eval = (epoch % FLAGS.evaluation_interval == 0)
# Pass tf constant to avoid re-tracing.
epoch_fn(tf.constant(should_eval))
current_step = (epoch + 1) * steps_per_epoch
max_steps = steps_per_epoch * FLAGS.train_epochs
time_elapsed = time.time() - start_time
steps_per_sec = float(current_step) / time_elapsed
eta_seconds = (max_steps - current_step) / steps_per_sec
message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
current_step / max_steps,
epoch + 1,
FLAGS.train_epochs,
steps_per_sec,
eta_seconds / 60,
time_elapsed / 60))
logging.info(message)
logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
metrics['train/loss'].result(),
metrics['train/accuracy'].result() * 100)
if should_eval:
logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
metrics['test/negative_log_likelihood'].result(),
metrics['test/accuracy'].result() * 100)
total_metrics = metrics.copy()
total_results = {name: metric.result()
for name, metric in total_metrics.items()}
total_results.update({'lr': learning_rate(optimizer.iterations)})
with summary_writer.as_default():
for name, result in total_results.items():
if should_eval or 'test' not in name:
tf.summary.scalar(name, result, step=epoch + 1)
for metric in metrics.values():
metric.reset_states()
if (FLAGS.checkpoint_interval > 0 and
(epoch + 1) % FLAGS.checkpoint_interval == 0):
checkpoint_name = checkpoint.save(os.path.join(
FLAGS.output_dir, 'checkpoint'))
logging.info('Saved checkpoint to %s', checkpoint_name)
if __name__ == '__main__':
app.run(main)
|
the-stack_106_30155 | import os
import sys
from tkinter import *
from tkinter import messagebox
from turtle import right
from buttonDict import buttonDict
from operations import *
from colorpallet import *
class Application(Tk):
def __init__(self, master=None):
super().__init__()
# Definição de tema e cores da calculadora
self.actualTheme = ""
self.setTheme()
self.changeWidgetColor()
# Tamanho, posição e nome da janela
self.geometry("280x380+1000+300")
self.title('Calculadora')
# Listas
self.buttonList = []
self.calculatorMemory = []
self.simbolos = ["+", "-", "x", "÷"]
# Fontes
self.defaultFont = ("Bahnschrift Light", "15", "bold")
self.btnFont = ("Bahnschrift Light", "15")
# Criando Widget e Título da Aplicação
self.titleContainer = Frame(master)
self.titleContainer["pady"] = 5
self.titleContainer["bg"] = self.corFundo
self.titleContainer.pack()
self.lbl_Title = Label(self.titleContainer, text=" Calculadora", font=self.defaultFont, bg=self.corFundo, fg= self.corFonte)
self.lbl_Title.pack()
# Caixa Input de Dados
self.containerInput = Frame(master)
self.containerInput.pack(pady= 5)
self.actualValue = ""
self.inputBox = Entry(self.containerInput,
font=("Bahnschrift Light", "15"),
state=DISABLED,
disabledbackground= self.corFundoInput,
disabledforeground= self.corFonte,
justify='right',
width=18)
self.inputBox.pack()
# Definindo tamanho padrão dos botões
self.btnSize = (1, 3)
self.btnHeight, self.btnWidth = self.btnSize
# Criando widget e botões
self.containerBtn = Frame(master)
self.containerBtn["bg"] = self.corFundo
self.containerBtn.pack()
self.createButtons()
# ------------------ Adicionando botões de forma prática atraves da lista no arquivo buttonDict.py
def createButtons(self):
for i in range(len(buttonDict)):
indexDict = 1 + i
self.buttonList.append(Button(
self.containerBtn,
text=buttonDict[indexDict]['char'],
font=self.btnFont,
bg= self.corFundoBotao,
fg= self.corFonte,
activebackground= self.corBotaoContraste,
width=self.btnWidth,
height=self.btnHeight))
actualName = self.buttonList[i]['text']
try:
int(buttonDict[indexDict]['char'])
value = self.buttonList[i]['text']
self.buttonList[i].configure(command=lambda actualValue=value: self.insertValue(actualValue))
except ValueError:
pass
if actualName == "=":
self.buttonList[i].configure(command=self.calculate, bg= self.corBotaoEqual, activebackground= self.corBotaoEqualC)
elif actualName == "C":
self.buttonList[i].configure(command=self.clearInput)
elif actualName == "CC":
self.buttonList[i].configure(command=self.clearBoth)
elif actualName == ".":
self.buttonList[i].configure(command=lambda actualValue=".": self.insertValue(actualValue))
elif actualName in self.simbolos:
self.buttonList[i].configure(command=lambda simbolo=actualName: self.addOperator(simbolo),
bg= self.corBotaoSimbolo, activebackground= self.corBotaoSimboloC)
elif actualName == "🖤":
self.buttonList[i].configure(command=self.alterTheme)
self.buttonList[i].grid(
row=buttonDict[indexDict]['row'],
column=buttonDict[indexDict]['column'],
padx=1,
pady=8)
# --------------------- Adicionando a função dos botões aritiméticos
def addOperator(self, symbol):
if len(self.inputBox.get()) > 0:
if self.inputBox.get() in self.simbolos:
self.calculatorMemory.remove(self.calculatorMemory[-1])
self.clearInput()
self.insertValue(symbol)
self.calculatorMemory.append(symbol)
else:
self.calculatorMemory.append(float(self.inputBox.get()))
self.calculatorMemory.append(symbol)
print(self.calculatorMemory)
self.clearInput()
self.insertValue(symbol)
# ----------------------- Lógica da Calculadora
def calculate(self):
try:
self.calculatorMemory.append(float(self.inputBox.get()))
except ValueError:
return
self.clearInput()
print(self.calculatorMemory)
if len(self.calculatorMemory) % 2 == 0:
return
while not len(self.calculatorMemory) == 1:
if '÷' in self.calculatorMemory:
self.executeCalc(self.getSymbolIndex("÷"))
elif 'x' in self.calculatorMemory:
self.executeCalc(self.getSymbolIndex("x"))
elif '+' in self.calculatorMemory:
self.executeCalc(self.getSymbolIndex("+"))
elif '-' in self.calculatorMemory:
self.executeCalc(self.getSymbolIndex("-"))
else:
break
if self.calculatorMemory[0].is_integer():
self.insertValue(int(self.calculatorMemory[0]))
else:
self.insertValue(self.calculatorMemory)
print(self.calculatorMemory)
self.clearMemory()
def executeCalc(self, valTuple):
valA, valB, symIndex, symbol = valTuple
if valB == 0 and symbol == "÷":
messagebox.showerror("Erro", "Dividiu valor por 0")
self.clearBoth()
return
result = operations[symbol](valA, valB)
del self.calculatorMemory[symIndex]
self.calculatorMemory.remove(valA)
self.calculatorMemory.remove(valB)
self.calculatorMemory.append(result)
def getSymbolIndex(self, symbol):
symIndex = self.calculatorMemory.index(symbol)
valA = self.calculatorMemory[symIndex - 1]
valB = self.calculatorMemory[symIndex + 1]
valTuple = (valA, valB, symIndex, symbol)
return valTuple
# --------------------- Funções de Inserção e Memória -----------------------------
def insertValue(self, value):
if any(simbolo in self.inputBox.get() for simbolo in self.simbolos):
self.clearInput()
if self.inputBox.get() == "0":
self.clearInput()
if value == ".":
inputValue = self.inputBox.get()
if len(self.inputBox.get()) == 0:
return
elif inputValue.find(".") != -1:
return
self.inputBox['state'] = NORMAL
self.inputBox.insert(END, value)
self.inputBox['state'] = DISABLED
def clearInput(self):
self.inputBox['state'] = NORMAL
self.inputBox.delete(0, END)
self.inputBox['state'] = DISABLED
def clearMemory(self):
self.calculatorMemory = []
def clearBoth(self):
self.clearInput()
self.clearMemory()
def changeWidgetColor(self):
self.corFundo = themes[self.actualTheme]["corBackground"]
self.corFonte = themes[self.actualTheme]["corFonte"]
self.corFundoInput = themes[self.actualTheme]["corInput"]
self.corFundoBotao = themes[self.actualTheme]["corInput"]
self.corBotaoContraste = themes[self.actualTheme]["corContraste"]
self.corBotaoSimbolo = themes[self.actualTheme]["corBotaoSimbolo"]
self.corBotaoSimboloC = themes[self.actualTheme]["corBotaoSimboloC"]
self.corBotaoEqual = themes[self.actualTheme]["corBotaoEqual"]
self.corBotaoEqualC = themes[self.actualTheme]["corBotaoEqualC"]
self.configure(bg=self.corFundo)
def setTheme(self):
with open('calculadora/theme-select.txt', 'r+') as f:
theme = f.readline()
if len(theme) == 0 or not theme in themes:
f.write('blackTheme')
self.actualTheme = 'blackTheme'
else:
self.actualTheme = theme
def changeTheme(self, theme):
if not theme == self.actualTheme:
with open('calculadora/theme-select.txt', 'w') as f:
f.write(theme)
self.setTheme()
self.restart_program()
def alterTheme(self):
if self.actualTheme == 'blackTheme':
self.changeTheme('whiteTheme')
else:
self.changeTheme('blackTheme')
def restart_program(self):
python = sys.executable
os.execl(python, python, * sys.argv)
if __name__ == "__main__":
app = Application()
app.lift()
app.mainloop()
|
the-stack_106_30156 | # coding: utf-8
from functools import partial
import sublime
from sublime_plugin import WindowCommand
from .util import StatusSpinner, noop
from .cmd import GitCmd
from .helpers import GitTagHelper, GitErrorHelper
TAG_FORCE = u'The tag %s already exists. Do you want to overwrite it?'
class GitAddTagCommand(WindowCommand, GitTagHelper, GitErrorHelper, GitCmd):
"""
Documentation coming soon.
"""
def run(self, sign=False):
repo = self.get_repo()
if not repo:
return
tags = self.get_tags(repo, annotate=False)
def on_done(name):
name = name.strip()
if not name:
return
if name in tags:
if sublime.ok_cancel_dialog(TAG_FORCE % name, 'Overwrite'):
self.on_name(repo, sign, name, force=True)
else:
self.on_name(repo, sign, name)
self.window.show_input_panel('Tag:', '', on_done, noop, noop)
def on_name(self, repo, sign, name, force=False):
def on_done(message):
message = message.strip()
if not message:
if sign:
if sublime.ok_cancel_dialog('A signed tag requires a message.', 'Enter message'):
self.on_name(repo, sign, name, force)
return
else:
message = None
self.on_message(repo, sign, name, force, message)
self.window.show_input_panel('Message:', '', on_done, noop, noop)
def on_message(self, repo, sign, name, force, message=None):
kind = None
if sign:
kind = '--sign'
elif message:
kind = '--annotate'
# build command
cmd = ['tag', kind, '--force' if force else None]
if message:
cmd += ['-F', '-']
cmd += [name]
# run command
exit, stdout, stderr = self.git(cmd, cwd=repo, stdin=message)
if exit == 0:
if stdout:
panel = self.window.get_output_panel('git-tag')
panel.run_command('git_panel_write', {'content': stdout})
self.window.run_command('show_panel', {'panel': 'output.git-tag'})
else:
sublime.status_message("Added tag %s" % name)
else:
sublime.error_message(self.format_error_message(stderr))
class GitTagCommand(WindowCommand, GitTagHelper, GitErrorHelper, GitCmd):
"""
Documentation coming soon.
"""
ADD_TAG = '+ Add tag'
SHOW = 'Show'
CHECKOUT = 'Checkout'
VERIFY = 'Verify'
DELETE = 'Delete'
TAG_ACTIONS = [
[SHOW, 'git show {tag}'],
[CHECKOUT, 'git checkout tags/{tag}'],
[VERIFY, 'git tag --verify {tag}'],
[DELETE, 'git tag --delete {tag}'],
]
ACTION_CALLBACKS = {
SHOW: 'show_tag',
CHECKOUT: 'checkout_tag',
VERIFY: 'verify_tag',
DELETE: 'delete_tag',
}
def run(self, repo=None):
repo = repo or self.get_repo()
if not repo:
return
tags = self.get_tags(repo)
choices = self.format_quick_tags(tags)
choices.append([self.ADD_TAG, 'Add a tag referencing the current commit.'])
def on_done(idx):
if idx != -1:
tag = choices[idx][0]
if tag == self.ADD_TAG:
self.window.run_command('git_add_tag')
else:
sublime.set_timeout(partial(self.on_tag, repo, tag), 50)
self.window.show_quick_panel(choices, on_done)
def on_tag(self, repo, tag):
choices = [[a, t.format(tag=tag)] for a, t in self.TAG_ACTIONS]
def on_done(idx):
if idx != -1:
action = self.TAG_ACTIONS[idx][0]
callback = self.ACTION_CALLBACKS.get(action)
func = getattr(self, callback, None)
if func:
func(repo, tag)
self.window.show_quick_panel(choices, on_done)
def reset(self, repo):
def on_time():
self.window.run_command('git_tag', {'repo': repo})
sublime.set_timeout(on_time, 50)
# callbacks
def verify_tag(self, repo, tag):
self.panel = self.window.get_output_panel('git-tag')
self.panel_shown = False
thread = self.git_async(['tag', '--verify', tag], cwd=repo, on_data=self.on_data)
runner = StatusSpinner(thread, "Verifying %s" % tag)
runner.start()
self.reset(repo)
def show_tag(self, repo, tag):
self.window.run_command('git_show', {'repo': repo, 'obj': 'tags/%s' % tag})
def delete_tag(self, repo, tag):
exit, stdout, stderr = self.git(['tag', '--delete', tag], cwd=repo)
if exit == 0:
self.reset(repo)
else:
sublime.error_message(stderr)
def checkout_tag(self, repo, tag):
self.window.run_command('git_checkout_tag', {'repo': repo, 'tag': tag})
# async helpers
def on_data(self, d):
if not self.panel_shown:
self.window.run_command('show_panel', {'panel': 'output.git-tag'})
self.panel.run_command('git_panel_append', {'content': d, 'scroll': True})
|
the-stack_106_30157 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2020 fetchai
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains default's message definition."""
import logging
from typing import Dict, Set, Tuple, cast
from aea.configurations.base import PublicId
from aea.exceptions import AEAEnforceError, enforce
from aea.protocols.base import Message
from packages.fetchai.protocols.default.custom_types import ErrorCode as CustomErrorCode
_default_logger = logging.getLogger("aea.packages.fetchai.protocols.default.message")
DEFAULT_BODY_SIZE = 4
class DefaultMessage(Message):
"""A protocol for exchanging any bytes message."""
protocol_id = PublicId.from_str("fetchai/default:0.9.0")
ErrorCode = CustomErrorCode
class Performative(Message.Performative):
"""Performatives for the default protocol."""
BYTES = "bytes"
ERROR = "error"
def __str__(self):
"""Get the string representation."""
return str(self.value)
_performatives = {"bytes", "error"}
class _SlotsCls:
__slots__ = (
"content",
"dialogue_reference",
"error_code",
"error_data",
"error_msg",
"message_id",
"performative",
"target",
)
def __init__(
self,
performative: Performative,
dialogue_reference: Tuple[str, str] = ("", ""),
message_id: int = 1,
target: int = 0,
**kwargs,
):
"""
Initialise an instance of DefaultMessage.
:param message_id: the message id.
:param dialogue_reference: the dialogue reference.
:param target: the message target.
:param performative: the message performative.
"""
super().__init__(
dialogue_reference=dialogue_reference,
message_id=message_id,
target=target,
performative=DefaultMessage.Performative(performative),
**kwargs,
)
@property
def valid_performatives(self) -> Set[str]:
"""Get valid performatives."""
return self._performatives
@property
def dialogue_reference(self) -> Tuple[str, str]:
"""Get the dialogue_reference of the message."""
enforce(self.is_set("dialogue_reference"), "dialogue_reference is not set.")
return cast(Tuple[str, str], self.get("dialogue_reference"))
@property
def message_id(self) -> int:
"""Get the message_id of the message."""
enforce(self.is_set("message_id"), "message_id is not set.")
return cast(int, self.get("message_id"))
@property
def performative(self) -> Performative: # type: ignore # noqa: F821
"""Get the performative of the message."""
enforce(self.is_set("performative"), "performative is not set.")
return cast(DefaultMessage.Performative, self.get("performative"))
@property
def target(self) -> int:
"""Get the target of the message."""
enforce(self.is_set("target"), "target is not set.")
return cast(int, self.get("target"))
@property
def content(self) -> bytes:
"""Get the 'content' content from the message."""
enforce(self.is_set("content"), "'content' content is not set.")
return cast(bytes, self.get("content"))
@property
def error_code(self) -> CustomErrorCode:
"""Get the 'error_code' content from the message."""
enforce(self.is_set("error_code"), "'error_code' content is not set.")
return cast(CustomErrorCode, self.get("error_code"))
@property
def error_data(self) -> Dict[str, bytes]:
"""Get the 'error_data' content from the message."""
enforce(self.is_set("error_data"), "'error_data' content is not set.")
return cast(Dict[str, bytes], self.get("error_data"))
@property
def error_msg(self) -> str:
"""Get the 'error_msg' content from the message."""
enforce(self.is_set("error_msg"), "'error_msg' content is not set.")
return cast(str, self.get("error_msg"))
def _is_consistent(self) -> bool:
"""Check that the message follows the default protocol."""
try:
enforce(
type(self.dialogue_reference) == tuple,
"Invalid type for 'dialogue_reference'. Expected 'tuple'. Found '{}'.".format(
type(self.dialogue_reference)
),
)
enforce(
type(self.dialogue_reference[0]) == str,
"Invalid type for 'dialogue_reference[0]'. Expected 'str'. Found '{}'.".format(
type(self.dialogue_reference[0])
),
)
enforce(
type(self.dialogue_reference[1]) == str,
"Invalid type for 'dialogue_reference[1]'. Expected 'str'. Found '{}'.".format(
type(self.dialogue_reference[1])
),
)
enforce(
type(self.message_id) == int,
"Invalid type for 'message_id'. Expected 'int'. Found '{}'.".format(
type(self.message_id)
),
)
enforce(
type(self.target) == int,
"Invalid type for 'target'. Expected 'int'. Found '{}'.".format(
type(self.target)
),
)
# Light Protocol Rule 2
# Check correct performative
enforce(
type(self.performative) == DefaultMessage.Performative,
"Invalid 'performative'. Expected either of '{}'. Found '{}'.".format(
self.valid_performatives, self.performative
),
)
# Check correct contents
actual_nb_of_contents = len(self._body) - DEFAULT_BODY_SIZE
expected_nb_of_contents = 0
if self.performative == DefaultMessage.Performative.BYTES:
expected_nb_of_contents = 1
enforce(
type(self.content) == bytes,
"Invalid type for content 'content'. Expected 'bytes'. Found '{}'.".format(
type(self.content)
),
)
elif self.performative == DefaultMessage.Performative.ERROR:
expected_nb_of_contents = 3
enforce(
type(self.error_code) == CustomErrorCode,
"Invalid type for content 'error_code'. Expected 'ErrorCode'. Found '{}'.".format(
type(self.error_code)
),
)
enforce(
type(self.error_msg) == str,
"Invalid type for content 'error_msg'. Expected 'str'. Found '{}'.".format(
type(self.error_msg)
),
)
enforce(
type(self.error_data) == dict,
"Invalid type for content 'error_data'. Expected 'dict'. Found '{}'.".format(
type(self.error_data)
),
)
for key_of_error_data, value_of_error_data in self.error_data.items():
enforce(
type(key_of_error_data) == str,
"Invalid type for dictionary keys in content 'error_data'. Expected 'str'. Found '{}'.".format(
type(key_of_error_data)
),
)
enforce(
type(value_of_error_data) == bytes,
"Invalid type for dictionary values in content 'error_data'. Expected 'bytes'. Found '{}'.".format(
type(value_of_error_data)
),
)
# Check correct content count
enforce(
expected_nb_of_contents == actual_nb_of_contents,
"Incorrect number of contents. Expected {}. Found {}".format(
expected_nb_of_contents, actual_nb_of_contents
),
)
# Light Protocol Rule 3
if self.message_id == 1:
enforce(
self.target == 0,
"Invalid 'target'. Expected 0 (because 'message_id' is 1). Found {}.".format(
self.target
),
)
else:
enforce(
0 < self.target < self.message_id,
"Invalid 'target'. Expected an integer between 1 and {} inclusive. Found {}.".format(
self.message_id - 1, self.target,
),
)
except (AEAEnforceError, ValueError, KeyError) as e:
_default_logger.error(str(e))
return False
return True
|
the-stack_106_30158 | from rest_framework import serializers
from .models import Author, FriendRequest, Friends,Post,Comment,VisibleToPost,Categories, Following,Image
from django.utils import timezone
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.utils.dateparse import parse_datetime
from django.utils import timezone
from datetime import datetime
from django.db.models import Q
from django.contrib.auth.models import Permission
from rest_framework.relations import HyperlinkedIdentityField
from .pagination import CustomPagination,CommentPagination
class UserSerializer(serializers.ModelSerializer):
class Meta:
model=User
fields=('id','username','email')
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model=User
fields=('id','username','email','password')
extra_kwargs={'password':{'write_only':True}}
def create(self,validated_data):
user=User.objects.create_user(validated_data['username'],validated_data['email'],validated_data['password'])
author=Author.objects.create(username=validated_data['username'], password=validated_data['password'],owner=user,hostName="https://project-cmput404.herokuapp.com")
auth=Author.objects.filter(owner=user)
url=auth[0].get_url(auth[0])
auth.update(url=url)
return user
class LoginSerializer(serializers.Serializer):
username=serializers.CharField()
password=serializers.CharField()
def validate(self,data):
user=authenticate(**data)
print(user)
if user and user.is_active:
return user
elif user and not user.is_active:
raise serializers.ValidationError("Please Wait till we authorize you")
else:
raise serializers.ValidationError("Incorrect Crendentials")
class AuthorSerializer(serializers.ModelSerializer):
firstName=serializers.CharField(required=False)
lastName=serializers.CharField(required=False)
username=serializers.CharField(required=False)
hostName=serializers.URLField(read_only=True)
githubUrl=serializers.URLField(required=False)
class Meta:
model = Author
fields=['url','author_id','firstName','lastName','username','hostName','githubUrl']
def update(self, instance, validated_data):
instance.firstName = validated_data.get('firstName', instance.firstName)
instance.lastName = validated_data.get('lastName', instance.lastName)
instance.userName = validated_data.get('username', instance.username)
instance.githubUrl = validated_data.get('githubUrl', instance.githubUrl)
instance.save()
return instance
class FriendRequestSerializer(serializers.ModelSerializer):
# pk = serializers.PrimaryKeyRelatedField(queryset=FriendRequest.objects.all())
created = serializers.DateTimeField(default=timezone.now())
accepted = serializers.BooleanField(default=False)
regected = serializers.BooleanField(default=False)
class Meta:
model=FriendRequest
fields=('pk','from_author',
'to_author',
'created','accepted',
'regected')
def create(self, validated_data):
new_instance = FriendRequest.objects.create(\
from_author=validated_data.get('from_author'),\
to_author=validated_data.get('to_author'),\
created=timezone.now(),\
accepted=False,\
regected=False
)
new_instance.save()
return new_instance
def update(self, instance, validated_data):
instance.accepted = validated_data.get("accepted")
instance.regected = validated_data.get("regected")
instance.save()
return instance
class FriendsSerializer(serializers.ModelSerializer):
date = serializers.DateTimeField(default=datetime.now())
author1=AuthorSerializer(required=False)
author2=AuthorSerializer(required=False)
class Meta:
model=Friends
fields=('pk','author1',
'author2',
'date')
def create(self, validated_data):
# print(111, validated_data, 222)
new_instance = Friends.objects.create(\
author1=validated_data.get('to_author'),\
author2=validated_data.get('from_author'),\
date=timezone.now()
)
new_instance.save()
return new_instance
class VisibleToPostSerializer(serializers.ModelSerializer):
class Meta:
model=VisibleToPost
fields=['author_url']
class CommentSerializer(serializers.ModelSerializer):
published = serializers.DateTimeField(default=datetime.now())
pagination_class = CommentPagination
class Meta:
model= Comment
fields=['pk','comment','author','postid','published','contentType']
def create(instance,validated_data,author,post):
new_instance = Comment.objects.create(\
comment=validated_data.get('comment'),\
author=author,\
published=timezone.now(),\
postid=post,\
contentType= validated_data.get('contentType')
)
return new_instance
class CategoriesSerializer(serializers.ModelSerializer):
class Meta:
model=Categories
fields=['category']
class ImageSerializer(serializers.ModelSerializer):
img=serializers.CharField(required=False)
class Meta:
model=Image
fields=['img']
class FollowingSerializer(serializers.ModelSerializer):
class Meta:
model = Following
fields = ('follower', 'following', 'created')
def create(self, validated_data):
new_instance = Following.objects.create(\
follower=validated_data.get("requester_id"),\
following=validated_data.get("requestee_id"),\
created=timezone.now()\
)
new_instance.save()
return new_instance
class PostSerializer(serializers.ModelSerializer):
publicationDate = serializers.DateTimeField(default=datetime.now())
categories=CategoriesSerializer(many=True,source="post_categories",required=False)
comments= CommentSerializer(many=True,source='post_comment',required=False)
visibleTo=VisibleToPostSerializer(many=True,source="visible_post",required=False)
author=AuthorSerializer(required=False)
content=serializers.CharField(required=False)
title=serializers.CharField(required=False,max_length=50)
images=ImageSerializer(many=True,source="post_image",required=False)
class Meta:
model = Post
fields = ['postid' ,'publicationDate','title','source' ,'origin','contentType','author','content','permission','comments','categories','unlisted','visibleTo','images']
def create(self, validated_data,author,request):
new_instance = Post.objects.create(content=validated_data.get('content'),title=validated_data.get('title'), permission=validated_data.get('permission'),author=author,publicationDate=datetime.now(),contentType=validated_data.get('contentType'),origin="https://"+request.get_host())
if validated_data.get('categories'):
for category in validated_data.get('categories'):
categories=Categories.create(post=new_instance,category=category)
if validated_data.get('images'):
for image in validated_data.get('images'):
Image.objects.create(post_id=new_instance,img=image['base64'])
if validated_data.get('permission')=='M':
#TODO permitted authors find a way to do this
permitted_authors=validated_data.get('authors')
visible=VisibleToPost.objects.create(post=new_instance,author=author)
if validated_data.get('permission') == 'F':
friends=Friends.objects.filter(Q(author1=author)| Q(author2=author))
VisibleToPost.objects.create(post=new_instance,author=author,author_url=author.url)
for friend in friends:
if friend.author1 == author:
new_visible=VisibleToPost.objects.create(post=new_instance,author=friend.author2,author_url=friend.author2.url)
elif friend.author2 == author:
new_visible=VisibleToPost.objects.create(post=new_instance,author=friend.author1,author_url=friend.author1.url)
elif validated_data.get('permission') == 'FH':
friends=Friends.objects.filter(Q(author1=author)| Q(author2=author))
print(friends)
for friend in friends:
print(friend.author2.hostName,request.get_host())
if friend.author1 == author and friend.author2.hostName.replace("https://","") == request.get_host() and friend.author1.hostName.replace("https://","") == request.get_host():
print("here")
new_visible=VisibleToPost.objects.create(post=new_instance,author=friend.author2,author_url=friend.author2.url)
elif friend.author2 == author and friend.author1.hostName.replace("https://","") == request.get_host() and friend.author2.hostName.replace("https://","")==request.get_host():
print("overhere")
new_visible=VisibleToPost.objects.create(post=new_instance,author=friend.author1,author_url=friend.author1.url)
return new_instance
def update(self, instance, validated_data):
instance.title = validated_data.get('title', instance.title)
instance.content = validated_data.get('content', instance.content)
instance.permission = validated_data.get('permission', instance.permission)
instance.contentType = validated_data.get('contentType', instance.contentType)
instance.save()
return instance
|
the-stack_106_30159 | import csv
import emoji
# https://www.mindk.com/blog/how-to-develop-a-chat-bot/
API_KEY = 'telegram token from gotfather bot'
time_zone = 'Asia/Tel_Aviv'
start_message = emoji.emojize('Hello and welcome to the Israel\'s rent apartment bot \U0001F1EE\U0001F1F1\U0001F1EE\U0001F1F1\U0001F1EE\U0001F1F1 \n' \
'In this bot you will answer several question related to your' \
'dream apartment \U0001F4B0 :shower: :evergreen_tree: \n'
'Then u will get links to rent apartment that match your\n'
'profile and your preferences :sunrise: :house_with_garden: :sunrise: \n' \
'Write /help to see the commands that available in this bot \U00002694')
help_message = '1) /city - insert your city that you are interested living in \U0001F46A\n' \
'If you add any issues contact me in the button bellow \U0001F680'
city_message = 'Choose an area you are interested to living in \U0001F3E0'
room_message = 'Please choose number of rooms \U0001F3E8'
# https://worldpopulationreview.com/countries/cities/israel
num_col = 3
population = 128500
yad2_url_start = 'https://www.yad2.co.il/realestate/rent?'
with open('city_pop.csv') as csvfile:
csv_reader = csv.reader(csvfile)
city_list = [[row[0], yad2_url_start + row[2]] for row in csv_reader if int(row[1]) > population]
city_list[0][0] = 'Jerusalem'
city_list = city_list[: int(len(city_list)/num_col)*num_col]
# city_list = ['Haifa', 'Tel-Aviv', 'Jerusalem', 'Ramat-Gan']
# print(start_message)
|
the-stack_106_30160 | #!/usr/bin/env python
''' A tool for analyzing the tools options for conflicts.
This tool analyzes the c files in the directory given as the first argument for
conflicting options within tpm command groups. The groups themselves are
organized by the standard document:
https://trustedcomputinggroup.org/wp-content/uploads/TPM-Rev-2.0-Part-3-Commands-01.38.pdf
The tool outputs the group, the tools in the group, and the conflicting options. From there
a human will need to make a plan of action to correct the tools so they conform.
The tool exits with a status of non-zero on error, making this useful for checking
PRs and can thus be added to travis.
'''
from __future__ import print_function
import glob
import os
import re
import sys
from collections import Counter
class Tool(object):
'''Represents a tool name and it's options'''
def __init__(self, name, options):
self._name = name
self._options = options
@property
def options(self):
'''Returns the tools options'''
return self._options
@property
def name(self):
'''Returns the tools name'''
return self._name
def __str__(self, *args, **kwargs):
return "%s: %s" % (self._name, str(self._options))
class ToolConflictor(object):
'''Finds option conflicts in tools '''
_ignore = ["tpm2_tool"]
def __init__(self, tools, full=False):
self.full = full
# Using the command summary here:
# https://trustedcomputinggroup.org/wp-content/uploads/TPM-Rev-2.0-Part-3-Commands-01.38.pdf
# We organize the tools into their grouping
#
# Our names in the tools might not be exactly the same, so fix them up in the map
self._tools_by_group = [
{
"gname": "start-up",
"tools-in-group": ["tpm2_startup"],
"tools": [],
"conflict": None,
"ignore": set(),
},
{
"gname": "session",
"tools-in-group":
["tpm2_startauthsession", "tpm2_policyrestart"],
"tools": [],
"conflict": None,
"ignore": set()
},
{
"gname": "object",
"tools-in-group": [
"tpm2_create", "tpm2_createprimary", "tpm2_changeauth",
"tpm2_load", "tpm2_loadexternal", "tpm2_readpublic",
"tpm2_activatecredential", "tpm2_makecredential",
"tpm2_unseal"
],
"tools": [],
"conflict": None,
"ignore": set(['s', 'secret', 'f', 'format'])
},
{
"gname": "duplication",
"tools-in-group": ["tpm2_import", "tpm2_duplicate"],
"tools": [],
"conflict": None,
"ignore": set()
},
{
"gname": "asymmetric",
"tools-in-group": ["tpm2_rsaencrypt", "tpm2_rsadecrypt"],
"tools": [],
"conflict": None,
"ignore": set()
},
{
"gname": "symmetric",
"tools-in-group":
["tpm2_encryptdecrypt", "tpm2_hmac", "tpm2_hash"],
"tools": [],
"conflict": None,
"ignore": set(['a', 'hierarchy', 'D', 'decrypt', 't', 'ticket', 'i', 'iv', 'mode', 'halg', 'g', 'G'])
},
{
"gname": "random",
"tools-in-group": ["tpm2_getrandom", "tpm2_stirrandom"],
"tools": [],
"conflict": None,
"ignore": set()
},
{
"gname": "attestation",
"tools-in-group": ["tpm2_certify", "tpm2_quote"],
"tools": [],
"conflict": None,
"ignore": set(['g', 'halg', 'm', 'message', 'signature', 'pcrs'])
},
{
"gname": "signing",
"tools-in-group": ["tpm2_verifysignature", "tpm2_sign"],
"tools": [],
"conflict": None,
"ignore": set(['s', 'sig'])
},
{
"gname": "integrity",
"tools-in-group":
["tpm2_pcrextend", "tpm2_pcrevent", "tpm2_pcrlist", "tpm2_pcrreset", "tpm2_checkquote", "tpm2_pcrallocate"],
"tools": [],
"conflict": None,
"ignore": set(['g', 'halg', 'f', 'format', 's', 'algs'])
},
{
"gname": "ea",
"tools-in-group": ["tpm2_policypcr", "tpm2_createpolicy", "tpm2_policyauthorize", "tpm2_policyor", "tpm2_policypassword",
"tpm2_policycommandcode", "tpm2_policysecret", "tpm2_policylocality", "tpm2_policyduplicationselect"],
"tools": [],
"conflict": None,
"ignore": set(['q', 'qualifier', 'n', 'name', 't', 'ticket', 'policy-list', 'c', 'context', 'N', 'new-parent-name', 'i', 'is-include-object'])
},
{
"gname": "hierarchy",
"tools-in-group": ["tpm2_clear", "tpm2_clearcontrol", "tpm2_hierarchycontrol"],
"tools": [],
"conflict": None,
"ignore": set(['c', 'clear'])
},
{
"gname": "context",
"tools-in-group": ["tpm2_flushcontext", "tpm2_evictcontrol"],
"tools": [],
"conflict": None,
"ignore": set(['S', 'session', 'p', 'persistent', 'a', 'hierarchy'])
},
{
"gname": "nv",
"tools-in-group": [
"tpm2_nvreadlock", "tpm2_nvrelease", "tpm2_nvdefine",
"tpm2_nvread", "tpm2_nvwrite", "tpm2_nvlist", "tpm2_nvincrement"
],
"tools": [],
"conflict": None,
"ignore": set(['S', 'session', 't', 'attributes'])
},
{
"gname": "capability",
"tools-in-group": ["tpm2_getcap", "tpm2_testparms"],
"tools": [],
"conflict": None,
"ignore": set(['capability', 'c', 'list', 'l'])
},
{
"gname": "dictionary",
"tools-in-group": ["tpm2_dictionarylockout"],
"tools": [],
"conflict": None,
"ignore": set()
},
{
"gname": "custom",
"tools-in-group": [
"tpm2_send", "tpm2_createak", "tpm2_createek",
"tpm2_getmanufec"
],
"tools": [],
"conflict": None,
"ignore": set()
},
{
"gname": "testing",
"tools-in-group": ["tpm2_selftest", "tpm2_incrementalselftest", "tpm2_gettestresult"],
"tools": [],
"conflict": None,
"ignore": set()
}
]
for tool in tools:
if tool.name in ToolConflictor._ignore:
continue
found = False
for tool_group in self._tools_by_group:
if tool.name in tool_group['tools-in-group']:
tool_group["tools"].append(tool)
found = True
break
if not found:
sys.exit("Group not found for tool : %s" % tool.name)
def process(self):
'''Processes the tool groups and generates the conflict data'''
#
# Now that we have the tools mapped onto a group, lets figure out conflicts within the
# group, and record them in the conflict field.
#
for tool_group in self._tools_by_group:
# If their is only one tool, it can't conflict
if len(tool_group['tools']) == 1:
continue
# Identify options that are only used by a single tool within the group
option_list = [
opt
for tool in tool_group['tools']
for shortopt_longopt in tool.options.items()
for opt in shortopt_longopt
]
conflicts = set([opt for (opt, count) in Counter(option_list).items() if count==1])
conflicts -= tool_group['ignore']
if len(conflicts) > 0:
tool_group['conflict'] = conflicts
def report(self):
'''Prints a conflict report to stdout
It returns True if conflicts were detected or false otherwise
'''
has_conflicts = False
for tool_group in self._tools_by_group:
gname = tool_group["gname"]
conflicts = tool_group["conflict"]
tools = tool_group["tools"]
if conflicts is None:
continue
if not self.full and gname == "custom":
continue
has_conflicts = True
print("group: %s:" % (gname))
print("\ttools: %s" % str([t.name for t in tools]))
print("\tconflicts: %s" % (str(conflicts)))
return has_conflicts
# pylint: disable=locally-disabled, too-few-public-methods
class Parser(object):
'''Parses C files for long option style option blocks'''
regx = re.compile(
r'{\s*"([^"]+)"\s*,\s*(?:required_argument|no_argument)\s*,\s*\w+\s*,\s*\'(\w)\'\s*}')
def __init__(self, path=os.getcwd()):
self._path = path
@staticmethod
def _extract_options(source_file):
with open(source_file) as open_file:
contents = open_file.read()
# This returns a list of tuples, where each tuple
# is group 1 ... N of the matched options
# We want to build a dict() of short to long option
# and thus need to swap the positions in the tuple,
# as match order is long option then short option
match_obj = Parser.regx.findall(contents)
if match_obj is None:
return {}
# Reverse the tuples in the list and make a dictionary
# of short option to long option.
return dict([t[::-1] for t in match_obj])
def parse(self):
'''Parses the directory and aggregates tool option data'''
tools = []
path = os.path.join(self._path, "*.c")
for c_file in glob.glob(path):
name = (os.path.split(c_file)[-1])[:-2]
opts = Parser._extract_options(c_file)
tools.append(Tool(name, opts))
return tools
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("path", help="path to directory of c files to analyze")
parser.add_argument("-c", "--custom",
help="include custom tools in the report",
action="store_true")
args = parser.parse_args()
parser = Parser(args.path)
tools = parser.parse()
conflictor = ToolConflictor(tools, full=args.custom)
conflictor.process()
has_conflicts = conflictor.report()
# If it had conflicts, exit non-zero
sys.exit(has_conflicts)
if __name__ == "__main__":
main()
|
the-stack_106_30161 | from typing import Dict, Optional, Tuple
import tokenizers
import torch
from torch import Tensor, nn
from torch.nn import functional as F
from torch.nn.modules.transformer import _get_activation_fn
from whitespace_repair.model import tokenizer as toklib
from whitespace_repair.model.embedding import Embedding
from whitespace_repair.utils import common, constants, io, mask as mask_utils
from whitespace_repair.utils.config import EncoderDecoderConfig
logger = common.get_logger("ENCODER")
class BaseEncoder(nn.Module):
def __init__(self,
config: EncoderDecoderConfig,
device: torch.device):
super().__init__()
self.config = config
self.device = device
# set these attributes in child class
self.tokenizer: tokenizers.Tokenizer
self.padding_token_id: int
self.embedding: Embedding
self.encoder: nn.Module
def forward(self, src: torch.Tensor, src_mask: Optional[torch.Tensor] = None) \
-> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
raise NotImplementedError()
@property
def encoder_model_dim(self) -> int:
return self.config.model_dim
# exact copy of pytorch native transformer encoder layer, just with need_weights set to true
class TransformerEncoderLayer(nn.Module):
__constants__ = ['batch_first', 'norm_first']
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=F.relu,
layer_norm_eps=1e-5, batch_first=False, norm_first=False,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(TransformerEncoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(
d_model, nhead, dropout=dropout, batch_first=batch_first,
**factory_kwargs
)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward, **factory_kwargs)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs)
self.norm_first = norm_first
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
# Legacy string support for activation function.
if isinstance(activation, str):
self.activation = _get_activation_fn(activation)
else:
self.activation = activation
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerEncoderLayer, self).__setstate__(state)
def forward(self, src: Tensor, src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
# see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
x = src
if self.norm_first:
x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)
x = x + self._ff_block(self.norm2(x))
else:
x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask))
x = self.norm2(x + self._ff_block(x))
return x
# self-attention block
def _sa_block(self, x: Tensor,
attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:
x = self.self_attn(x, x, x,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=True)[0]
return self.dropout1(x)
# feed forward block
def _ff_block(self, x: Tensor) -> Tensor:
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout2(x)
class PytorchEncoder(BaseEncoder):
def __init__(self,
config: EncoderDecoderConfig,
device: torch.device,
custom_encoder_layer: nn.Module = None,
as_decoder: bool = False):
super().__init__(config=config, device=device)
self.as_decoder = as_decoder
self.tokenizer = toklib.load_tokenizer(self.config.tokenizer)
self.padding_token_id = self.tokenizer.token_to_id(constants.PAD)
self.embedding = Embedding(num_embeddings=self.tokenizer.get_vocab_size(),
embedding_dim=self.config.embedding_dim,
model_dim=self.config.model_dim,
pad_token_id=self.tokenizer.token_to_id(constants.PAD),
learned_positional_embeddings=self.config.learned_positional_embeddings,
max_num_embeddings=self.config.max_num_embeddings,
norm_embeddings=self.config.norm_embeddings,
dropout=self.config.dropout)
if custom_encoder_layer is not None:
encoder_layer = custom_encoder_layer
else:
encoder_layer = TransformerEncoderLayer(d_model=self.config.model_dim,
nhead=self.config.attention_heads,
dim_feedforward=self.config.feedforward_dim,
dropout=self.config.dropout,
activation=self.config.activation)
self.encoder = nn.TransformerEncoder(encoder_layer=encoder_layer,
num_layers=1 if self.config.share_parameters else self.config.num_layers)
if self.config.pretrained:
checkpoint = io.load_checkpoint(self.config.pretrained)
io.load_state_dict(module=self,
state_dict=checkpoint["model_state_dict"],
prefix="decoder." if as_decoder else "encoder.")
global logger
logger.info(f"Successfully loaded pretrained weights into {self.__class__.__name__} "
f"from {self.config.pretrained}")
if self.config.fixed:
for p in self.parameters():
p.requires_grad = False
self.to(self.device)
def forward(self, src: torch.Tensor, src_mask: Optional[torch.Tensor] = None) \
-> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
assert (src_mask is None or src_mask.dim() == 2), \
f"src_mask has to be of shape [S, S], but got {src_mask.shape}"
if self.as_decoder and src_mask is None:
S, B = src.shape
src_mask = mask_utils.generate_square_subsequent_mask(S, S, device=src.device)
src_key_padding_mask = mask_utils.get_padding_mask(src, self.padding_token_id)
emb = self.embedding(src)
# reuse the same layer multiple time when parameters are shared
if self.config.share_parameters:
enc = emb
for _ in range(self.config.num_layers):
enc = self.encoder(enc, mask=src_mask, src_key_padding_mask=src_key_padding_mask)
else:
enc = self.encoder(emb, mask=src_mask, src_key_padding_mask=src_key_padding_mask)
return enc, {}
def get_encoder_from_config(config: EncoderDecoderConfig, device: torch.device) -> BaseEncoder:
if config.type == "default":
encoder = PytorchEncoder(config=config,
device=device)
else:
raise ValueError(f"Unknown encoder type {config.type}")
return encoder
|
the-stack_106_30164 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A package with units for generic executables. Usually, PE, ELF, and MachO formats are covered.
"""
class ParsingFailure(ValueError):
def __init__(self, kind):
super().__init__(F'unable to parse input as {kind} file')
def exeroute(data, handler_elf, handler_macho, handler_pe, *args, **kwargs):
if data[:2] == B'MZ':
from pefile import PE as PEFile
try:
parsed = PEFile(data=data, fast_load=True)
except Exception as E:
raise ParsingFailure('PE') from E
else:
return handler_pe(parsed, *args, **kwargs)
if data[:4] == B'\x7FELF':
from ....lib.structures import MemoryFile
from elftools.elf.elffile import ELFFile
try:
parsed = ELFFile(MemoryFile(data))
except Exception as E:
raise ParsingFailure('ELF') from E
else:
return handler_elf(parsed, *args, **kwargs)
if set(data[:4]) <= {0xFE, 0xED, 0xFA, 0xCE, 0xCF}:
from ....lib.structures import MemoryFile
import macholib
import macholib.mach_o
import macholib.MachO
class InMemoryMachO(macholib.MachO.MachO):
def __init__(self):
self.graphident = None
self.filename = None
self.loader_path = None
self.fat = None
self.headers = []
self.load(MemoryFile(data))
try:
parsed = InMemoryMachO()
assert parsed.headers
except Exception as E:
raise ParsingFailure('MachO') from E
else:
return handler_macho(parsed, *args, **kwargs)
raise ValueError('Unknown executable format')
|
the-stack_106_30165 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import click
import click_log
import logging
import os
import sys
from .mqtt_client import MqttClient
from .config import load_config
from .vbus import DeltaSol_BS_Plus
__version__ = '0.0.0'
@click.command()
@click.version_option(version=__version__)
@click.option('--config', '-c', 'config_file', type=click.File('r'), required=True, help='configuration file (YAML format).')
@click_log.simple_verbosity_option(default='INFO')
def cli(config_file=None):
logging.info('Process started')
config = load_config(config_file)
try:
publish = config.get('publish', None)
logging.debug('publish %s', publish)
solar = DeltaSol_BS_Plus(**config['solar'])
mqtt = MqttClient(**config['mqtt'])
mqtt.loop_start()
def on_change(key, value):
if isinstance(value, float):
value = '%.1f' % value
else:
value = str(value)
logging.info("Change %s %s", key, value)
if publish is None:
return mqtt.publish(key, value, use_json=False)
tmp = publish.get(key, None)
if not tmp:
return
if isinstance(tmp, str):
mqtt.publish(tmp, value, use_json=False)
else:
mqtt.publish(key, value, use_json=False)
solar.on_change = on_change
solar.loop_forever()
except KeyboardInterrupt:
logging.warn('KeyboardInterrupt')
except Exception as e:
click.echo(str(e), err=True)
if os.environ.get('DEBUG', False):
raise e
sys.exit(1)
logging.info('Process stopped')
def main():
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s')
cli()
|
the-stack_106_30166 | dict_chisla = {} # Представление числа ПРОСТОЕ ЧИСЛО: КОЛИЧЕСТВО
flag_sost_chisla = False # Флаг составного числа
prost_chislo = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89,
97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151,
157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223,
227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359,
367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433,
439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,
509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593,
599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659,
661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743,
751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827,
829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911,
919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009]
# Функция факторизации числа х
def factoriz(x):
'''
:param x: Натуральное число х [1:1000]
:return: dict_chisla Канонический вид числа
'''
global flag_sost_chisla
global dict_chisla
global prost_chislo
ii = 0
del_tmp = []
while prost_chislo[ii] <= x:
if x % prost_chislo[ii] == 0:
flag_sost_chisla = True
del_tmp.append(prost_chislo[ii])
x = int(x / prost_chislo[ii])
ii = -1
ii += 1
if flag_sost_chisla == False:
dict_chisla[1] = 1
dict_chisla[x] = 1
for i in range (len(del_tmp)):
dict_chisla[del_tmp[i]] = del_tmp.count(del_tmp[i])
return
# Определение простоты числа
def prostoe_chislo(x):
'''
:param x: Исследуемое число
:return: flag_sost_chisla False/True - число простое/составное
'''
global prost_chislo
global flag_sost_chisla
flag_sost_chisla = False
ii = 0
i = int(x**0.5)
while prost_chislo[ii] <= i:
if x % prost_chislo[ii] == 0:
flag_sost_chisla = True
print()
print('Число ', x, 'составное')
break
ii += 1
if flag_sost_chisla == False:
print()
print('Число ', x, 'простое')
return
# Список простых делителей числа
def delitely_chisla(x):
'''
:param x: Исследуемое число
:return: Простые делители числа
'''
global dict_chisla
factoriz(x)
list_delitelei = list(dict_chisla.keys())
return print('Делители числа', x,' :',list_delitelei)
# Наибольший простой делитель числа
def max_delitel_chisla(x):
'''
:param x: Исследуемое число
:return: Наибольший простой делитель числа
'''
global dict_chisla
factoriz(x)
list_d = list(dict_chisla.keys())
list_d.sort(key=lambda i: i, reverse=True)
return print('Наибольший простой делитель числа ', x,' :', list_d[0])
# Kаноническое разложение числа
def kanonich_vid(x):
'''
:param x: Исследуемое число
:return: Kаноническое разложение числа
'''
global dict_chisla
list_chisla = []
tmp=[]
factoriz(x)
list_chisla = list(dict_chisla.items())
print('Канонический вид числа', x, ':')
print()
print(x, '= 1', end='')
for i in range (len(list_chisla)):
tmp = list_chisla[i]
print(' *',tmp[0],'^',tmp[1], end='')
print()
return
# Наибольший делитель (не обязательно простой) числа.
def naib_del_chisla(x):
'''
:param x: Исследуемое число
:return: Наибольший простой делитель числа
'''
global dict_chisla
list_chisla = []
tmp = []
tmp1=[]
a = 0
factoriz(x)
list_chisla = list(dict_chisla.items())
for i in range (len(list_chisla)):
tmp = list_chisla[i]
a = tmp[0]**tmp[1]
tmp1.append(a)
tmp1.sort(key=lambda i: i, reverse=True)
print('Наибольший делитель числа ', x, ':',tmp1[0])
return
|
the-stack_106_30167 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.insert(0, '.')
import os
import os.path as osp
import random
import logging
import time
import argparse
import numpy as np
from tabulate import tabulate
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.utils.data import DataLoader
import torch.cuda.amp as amp
from lib.models import model_factory
from configs import cfg_factory
from lib.cityscapes_cv2 import get_data_loader
from evaluate import eval_model
from lib.ohem_ce_loss import OhemCELoss
from lib.lr_scheduler import WarmupPolyLrScheduler
from lib.meters import TimeMeter, AvgMeter
from lib.logger import setup_logger, print_log_msg
## fix all random seeds
torch.manual_seed(123)
torch.cuda.manual_seed(123)
np.random.seed(123)
random.seed(123)
torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = True
# torch.multiprocessing.set_sharing_strategy('file_system')
def parse_args():
parse = argparse.ArgumentParser()
parse.add_argument('--local_rank', dest='local_rank', type=int, default=-1,)
parse.add_argument('--port', dest='port', type=int, default=44554,)
parse.add_argument('--model', dest='model', type=str, default='bisenetv2',)
parse.add_argument('--finetune-from', type=str, default=None,)
return parse.parse_args()
args = parse_args()
cfg = cfg_factory[args.model]
def set_model():
net = model_factory[cfg.model_type](19)
if not args.finetune_from is None:
net.load_state_dict(torch.load(args.finetune_from, map_location='cpu'))
if cfg.use_sync_bn: net = nn.SyncBatchNorm.convert_sync_batchnorm(net)
net.cuda()
net.train()
criteria_pre = OhemCELoss(0.7)
criteria_aux = [OhemCELoss(0.7) for _ in range(cfg.num_aux_heads)]
return net, criteria_pre, criteria_aux
def set_optimizer(model):
if hasattr(model, 'get_params'):
wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = model.get_params()
params_list = [
{'params': wd_params, },
{'params': nowd_params, 'weight_decay': 0},
{'params': lr_mul_wd_params, 'lr': cfg.lr_start * 10},
{'params': lr_mul_nowd_params, 'weight_decay': 0, 'lr': cfg.lr_start * 10},
]
else:
wd_params, non_wd_params = [], []
for name, param in model.named_parameters():
if param.dim() == 1:
non_wd_params.append(param)
elif param.dim() == 2 or param.dim() == 4:
wd_params.append(param)
params_list = [
{'params': wd_params, },
{'params': non_wd_params, 'weight_decay': 0},
]
optim = torch.optim.SGD(
params_list,
lr=cfg.lr_start,
momentum=0.9,
weight_decay=cfg.weight_decay,
)
return optim
def set_model_dist(net):
local_rank = dist.get_rank()
net = nn.parallel.DistributedDataParallel(
net,
device_ids=[local_rank, ],
# find_unused_parameters=True,
output_device=local_rank)
return net
def set_meters():
time_meter = TimeMeter(cfg.max_iter)
loss_meter = AvgMeter('loss')
loss_pre_meter = AvgMeter('loss_prem')
loss_aux_meters = [AvgMeter('loss_aux{}'.format(i))
for i in range(cfg.num_aux_heads)]
return time_meter, loss_meter, loss_pre_meter, loss_aux_meters
def train():
logger = logging.getLogger()
is_dist = dist.is_initialized()
## dataset
dl = get_data_loader(
cfg.im_root, cfg.train_im_anns,
cfg.ims_per_gpu, cfg.scales, cfg.cropsize,
cfg.max_iter, mode='train', distributed=is_dist)
## model
net, criteria_pre, criteria_aux = set_model()
## optimizer
optim = set_optimizer(net)
## mixed precision training
scaler = amp.GradScaler()
## ddp training
net = set_model_dist(net)
## meters
time_meter, loss_meter, loss_pre_meter, loss_aux_meters = set_meters()
## lr scheduler
lr_schdr = WarmupPolyLrScheduler(optim, power=0.9,
max_iter=cfg.max_iter, warmup_iter=cfg.warmup_iters,
warmup_ratio=0.1, warmup='exp', last_epoch=-1,)
## train loop
for it, (im, lb) in enumerate(dl):
im = im.cuda()
lb = lb.cuda()
lb = torch.squeeze(lb, 1)
optim.zero_grad()
with amp.autocast(enabled=cfg.use_fp16):
logits, *logits_aux = net(im)
loss_pre = criteria_pre(logits, lb)
loss_aux = [crit(lgt, lb) for crit, lgt in zip(criteria_aux, logits_aux)]
loss = loss_pre + sum(loss_aux)
scaler.scale(loss).backward()
scaler.step(optim)
scaler.update()
torch.cuda.synchronize()
time_meter.update()
loss_meter.update(loss.item())
loss_pre_meter.update(loss_pre.item())
_ = [mter.update(lss.item()) for mter, lss in zip(loss_aux_meters, loss_aux)]
## print training log message
if (it + 1) % 100 == 0:
lr = lr_schdr.get_lr()
lr = sum(lr) / len(lr)
print_log_msg(
it, cfg.max_iter, lr, time_meter, loss_meter,
loss_pre_meter, loss_aux_meters)
lr_schdr.step()
## dump the final model and evaluate the result
save_pth = osp.join(cfg.respth, 'model_final.pth')
logger.info('\nsave models to {}'.format(save_pth))
state = net.module.state_dict()
if dist.get_rank() == 0: torch.save(state, save_pth)
logger.info('\nevaluating the final model')
torch.cuda.empty_cache()
heads, mious = eval_model(net, 2, cfg.im_root, cfg.val_im_anns)
logger.info(tabulate([mious, ], headers=heads, tablefmt='orgtbl'))
return
def main():
torch.cuda.set_device(args.local_rank)
dist.init_process_group(
backend='nccl',
init_method='tcp://127.0.0.1:{}'.format(args.port),
world_size=torch.cuda.device_count(),
rank=args.local_rank
)
if not osp.exists(cfg.respth): os.makedirs(cfg.respth)
setup_logger('{}-train'.format(cfg.model_type), cfg.respth)
train()
if __name__ == "__main__":
main()
|
the-stack_106_30168 | from fairness.data.objects.Data import Data
class Adult(Data):
def __init__(self):
Data.__init__(self)
self.dataset_name = 'adult'
self.class_attr = 'income-per-year'
self.positive_class_val = '>50K'
self.sensitive_attrs = ['race', 'sex']
self.privileged_class_names = ['White', 'Male']
self.categorical_features = [ 'workclass', 'education', 'marital-status', 'occupation',
'relationship', 'native-country' ]
self.features_to_keep = [ 'age', 'workclass', 'education', 'education-num', 'marital-status',
'occupation', 'relationship', 'race', 'sex', 'capital-gain',
'capital-loss', 'hours-per-week', 'native-country',
'income-per-year' ]
self.missing_val_indicators = ['?']
|
the-stack_106_30169 | import sys
import datetime as dt
import pytest
import numpy as np
# Construction
class D:
def __index__(self) -> int:
return 0
class C:
def __complex__(self) -> complex:
return 3j
class B:
def __int__(self) -> int:
return 4
class A:
def __float__(self) -> float:
return 4.0
np.complex64(3j)
np.complex64(A())
np.complex64(C())
np.complex128(3j)
np.complex128(C())
np.complex128(None)
np.complex64("1.2")
np.complex128(b"2j")
np.int8(4)
np.int16(3.4)
np.int32(4)
np.int64(-1)
np.uint8(B())
np.uint32()
np.int32("1")
np.int64(b"2")
np.float16(A())
np.float32(16)
np.float64(3.0)
np.float64(None)
np.float32("1")
np.float16(b"2.5")
if sys.version_info >= (3, 8):
np.uint64(D())
np.float32(D())
np.complex64(D())
np.bytes_(b"hello")
np.bytes_("hello", 'utf-8')
np.bytes_("hello", encoding='utf-8')
np.str_("hello")
np.str_(b"hello", 'utf-8')
np.str_(b"hello", encoding='utf-8')
# Array-ish semantics
np.int8().real
np.int16().imag
np.int32().data
np.int64().flags
np.uint8().itemsize * 2
np.uint16().ndim + 1
np.uint32().strides
np.uint64().shape
# Time structures
np.datetime64()
np.datetime64(0, "D")
np.datetime64(0, b"D")
np.datetime64(0, ('ms', 3))
np.datetime64("2019")
np.datetime64(b"2019")
np.datetime64("2019", "D")
np.datetime64(np.datetime64())
np.datetime64(dt.datetime(2000, 5, 3))
np.datetime64(None)
np.datetime64(None, "D")
np.timedelta64()
np.timedelta64(0)
np.timedelta64(0, "D")
np.timedelta64(0, ('ms', 3))
np.timedelta64(0, b"D")
np.timedelta64("3")
np.timedelta64(b"5")
np.timedelta64(np.timedelta64(2))
np.timedelta64(dt.timedelta(2))
np.timedelta64(None)
np.timedelta64(None, "D")
np.void(1)
np.void(np.int64(1))
np.void(True)
np.void(np.bool_(True))
np.void(b"test")
np.void(np.bytes_("test"))
# Protocols
i8 = np.int64()
u8 = np.uint64()
f8 = np.float64()
c16 = np.complex128()
b_ = np.bool_()
td = np.timedelta64()
U = np.str_("1")
S = np.bytes_("1")
AR = np.array(1, dtype=np.float64)
int(i8)
int(u8)
int(f8)
int(b_)
int(td)
int(U)
int(S)
int(AR)
with pytest.warns(np.ComplexWarning):
int(c16)
float(i8)
float(u8)
float(f8)
float(b_)
float(td)
float(U)
float(S)
float(AR)
with pytest.warns(np.ComplexWarning):
float(c16)
complex(i8)
complex(u8)
complex(f8)
complex(c16)
complex(b_)
complex(td)
complex(U)
complex(AR)
|
the-stack_106_30170 | import os
import math
import time
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torch.utils.tensorboard import SummaryWriter
from experiments.exp_basic import Exp_Basic
from data_process.financial_dataloader import DataLoaderH
from utils.tools import EarlyStopping, adjust_learning_rate, save_model, load_model
from metrics.ETTh_metrics import metric
from utils.math_utils import smooth_l1_loss
from models.SCINet import SCINet
class Exp_financial(Exp_Basic):
def __init__(self, args):
super(Exp_financial, self).__init__(args)
if self.args.L1Loss:
self.criterion = smooth_l1_loss
else:
self.criterion = nn.MSELoss(size_average=False).cuda()
self.evaluateL2 = nn.MSELoss(size_average=False).cuda()
self.evaluateL1 = nn.L1Loss(size_average=False).cuda()
self.writer = SummaryWriter('.exp/run_financial/{}'.format(args.model_name))
def _build_model(self):
if self.args.dataset_name == 'electricity':
self.input_dim = 321
if self.args.dataset_name == 'solar_AL':
self.input_dim = 137
if self.args.dataset_name == 'exchange_rate':
self.input_dim = 8
if self.args.dataset_name == 'traffic':
self.input_dim = 862
model = SCINet(
output_len=self.args.horizon,
input_len=self.args.window_size,
input_dim=self.input_dim,
hid_size=self.args.hidden_size,
num_stacks=self.args.stacks,
num_levels=self.args.levels,
concat_len=self.args.concat_len,
groups=self.args.groups,
kernel=self.args.kernel,
dropout=self.args.dropout,
single_step_output_One=self.args.single_step_output_One,
positionalE=self.args.positionalEcoding,
modified=True,
RIN=self.args.RIN
)
print(model)
return model
def _get_data(self):
if self.args.dataset_name == 'electricity':
self.args.data = './datasets/financial/electricity.txt'
if self.args.dataset_name == 'solar_AL':
self.args.data = './datasets/financial/solar_AL.txt'
if self.args.dataset_name == 'exchange_rate':
self.args.data = './datasets/financial/exchange_rate.txt'
if self.args.dataset_name == 'traffic':
self.args.data = './datasets/financial/traffic.txt'
return DataLoaderH(self.args.data, 0.6, 0.2, self.args.horizon, self.args.window_size, self.args.normalize)
def _select_optimizer(self):
return torch.optim.Adam(params=self.model.parameters(), lr=self.args.lr, betas=(0.9, 0.999), weight_decay=1e-5)
def train(self):
best_val=10000000
optim=self._select_optimizer()
data=self._get_data()
X=data.train[0]
Y=data.train[1]
save_path = os.path.join(self.args.save_path, self.args.model_name)
if not os.path.exists(save_path):
os.makedirs(save_path)
if self.args.resume:
self.model, lr, epoch_start = load_model(self.model, save_path, model_name=self.args.dataset_name, horizon=self.args.horizon)
else:
epoch_start = 0
for epoch in range(epoch_start, self.args.epochs):
epoch_start_time = time.time()
iter = 0
self.model.train()
total_loss = 0
n_samples = 0
final_loss = 0
min_loss = 0
lr = adjust_learning_rate(optim, epoch, self.args)
for tx, ty in data.get_batches(X, Y, self.args.batch_size, True):
self.model.zero_grad() #torch.Size([32, 168, 137])
if self.args.stacks == 1:
forecast = self.model(tx)
elif self.args.stacks == 2:
forecast, res = self.model(tx)
scale = data.scale.expand(forecast.size(0), self.args.horizon, data.m)
bias = data.bias.expand(forecast.size(0), self.args.horizon, data.m)
weight = torch.tensor(self.args.lastWeight).cuda() #used with multi-step
if self.args.single_step: #single step
ty_last = ty[:, -1, :]
scale_last = data.scale.expand(forecast.size(0), data.m)
bias_last = data.bias.expand(forecast.size(0), data.m)
if self.args.normalize == 3:
loss_f = self.criterion(forecast[:, -1], ty_last)
if self.args.stacks == 2:
loss_m = self.criterion(res, ty)/res.shape[1] #average results
else:
loss_f = self.criterion(forecast[:, -1] * scale_last + bias_last, ty_last * scale_last + bias_last)
if self.args.stacks == 2:
loss_m = self.criterion(res * scale + bias, ty * scale + bias)/res.shape[1] #average results
else:
if self.args.normalize == 3:
if self.args.lastWeight == 1.0:
loss_f = self.criterion(forecast, ty)
if self.args.stacks == 2:
loss_m = self.criterion(res, ty)
else:
loss_f = self.criterion(forecast[:, :-1, :], ty[:, :-1, :] ) \
+ weight * self.criterion(forecast[:, -1:, :], ty[:, -1:, :] )
if self.args.stacks == 2:
loss_m = self.criterion(res[:, :-1, :] , ty[:, :-1, :] ) \
+ weight * self.criterion(res[:, -1:, :], ty[:, -1:, :] )
else:
if self.args.lastWeight == 1.0:
loss_f = self.criterion(forecast * scale + bias, ty * scale + bias)
if self.args.stacks == 2:
loss_m = self.criterion(res * scale + bias, ty * scale + bias)
else:
loss_f = self.criterion(forecast[:, :-1, :] * scale[:, :-1, :] + bias[:, :-1, :],
ty[:, :-1, :] * scale[:, :-1, :] + bias[:, :-1, :]) \
+ weight * self.criterion(forecast[:, -1:, :] * scale[:, -1:, :] + bias[:, -1:, :],
ty[:, -1:, :] * scale[:, -1:, :] + bias[:, -1:, :])
if self.args.stacks == 2:
loss_m = self.criterion(res[:, :-1, :] * scale[:, :-1, :] + bias[:, :-1, :],
ty[:, :-1, :] * scale[:, :-1, :] + bias[:, :-1, :]) \
+ weight * self.criterion(res[:, -1:, :] * scale[:, -1:, :] + bias[:, -1:, :],
ty[:, -1:, :] * scale[:, -1:, :] + bias[:, -1:, :])
loss = loss_f
if self.args.stacks == 2:
loss += loss_m
loss.backward()
total_loss += loss.item()
final_loss += loss_f.item()
if self.args.stacks == 2:
min_loss += loss_m.item()
n_samples += (forecast.size(0) * data.m)
grad_norm = optim.step()
if iter%100==0:
if self.args.stacks == 1:
print('iter:{:3d} | loss: {:.7f}'.format(iter, loss.item()/(forecast.size(0) * data.m)))
elif self.args.stacks == 2:
print('iter:{:3d} | loss: {:.7f}, loss_final: {:.7f}, loss_mid: {:.7f}'.format(iter, loss.item()/(forecast.size(0) * data.m),
loss_f.item()/(forecast.size(0) * data.m),loss_m.item()/(forecast.size(0) * data.m)))
iter += 1
if self.args.stacks == 1:
val_loss, val_rae, val_corr = self.validate(data, data.valid[0],data.valid[1])
test_loss, test_rae, test_corr = self.validate(data, data.test[0],data.test[1])
elif self.args.stacks == 2:
val_loss, val_rae, val_corr, val_rse_mid, val_rae_mid, val_correlation_mid=self.validate(data, data.valid[0],data.valid[1])
test_loss, test_rae, test_corr, test_rse_mid, test_rae_mid, test_correlation_mid= self.validate(data, data.test[0],data.test[1])
self.writer.add_scalar('Train_loss_tatal', total_loss / n_samples, global_step=epoch)
self.writer.add_scalar('Train_loss_Final', final_loss / n_samples, global_step=epoch)
self.writer.add_scalar('Validation_final_rse', val_loss, global_step=epoch)
self.writer.add_scalar('Validation_final_rae', val_rae, global_step=epoch)
self.writer.add_scalar('Validation_final_corr', val_corr, global_step=epoch)
self.writer.add_scalar('Test_final_rse', test_loss, global_step=epoch)
self.writer.add_scalar('Test_final_rae', test_rae, global_step=epoch)
self.writer.add_scalar('Test_final_corr', test_corr, global_step=epoch)
if self.args.stacks == 2:
self.writer.add_scalar('Train_loss_Mid', min_loss / n_samples, global_step=epoch)
self.writer.add_scalar('Validation_mid_rse', val_rse_mid, global_step=epoch)
self.writer.add_scalar('Validation_mid_rae', val_rae_mid, global_step=epoch)
self.writer.add_scalar('Validation_mid_corr', val_correlation_mid, global_step=epoch)
self.writer.add_scalar('Test_mid_rse', test_rse_mid, global_step=epoch)
self.writer.add_scalar('Test_mid_rae', test_rae_mid, global_step=epoch)
self.writer.add_scalar('Test_mid_corr', test_correlation_mid, global_step=epoch)
print(
'| EncoDeco: end of epoch {:3d} | time: {:5.2f}s | train_loss {:5.4f} | valid rse {:5.4f} | valid rae {:5.4f} | valid corr {:5.4f}|'
' test rse {:5.4f} | test rae {:5.4f} | test corr {:5.4f}'.format(
epoch, (time.time() - epoch_start_time), total_loss / n_samples, val_loss, val_rae, val_corr, test_loss, test_rae, test_corr), flush=True)
if val_loss < best_val:
save_model(epoch, lr, self.model, save_path, model_name=self.args.dataset_name, horizon=self.args.horizon)
print('--------------| Best Val loss |--------------')
best_val = val_loss
return total_loss / n_samples
def validate(self, data, X, Y, evaluate=False):
self.model.eval()
total_loss = 0
total_loss_l1 = 0
total_loss_mid = 0
total_loss_l1_mid = 0
n_samples = 0
predict = None
res_mid = None
test = None
forecast_set = []
Mid_set = []
target_set = []
if evaluate:
save_path = os.path.join(self.args.save_path, self.args.model_name)
self.model = load_model(self.model, save_path, model_name=self.args.dataset_name, horizon=self.args.horizon)[0]
for X, Y in data.get_batches(X, Y, self.args.batch_size, False):
with torch.no_grad():
if self.args.stacks == 1:
forecast = self.model(X)
elif self.args.stacks == 2:
forecast, res = self.model(X) #torch.Size([32, 3, 137])
# only predict the last step
true = Y[:, -1, :].squeeze()
output = forecast[:,-1,:].squeeze()
forecast_set.append(forecast)
target_set.append(Y)
if self.args.stacks == 2:
Mid_set.append(res)
if len(forecast.shape)==1:
forecast = forecast.unsqueeze(dim=0)
if self.args.stacks == 2:
res = res.unsqueeze(dim=0)
if predict is None:
predict = forecast[:,-1,:].squeeze()
test = Y[:,-1,:].squeeze() #torch.Size([32, 3, 137])
if self.args.stacks == 2:
res_mid = res[:,-1,:].squeeze()
else:
predict = torch.cat((predict, forecast[:,-1,:].squeeze()))
test = torch.cat((test, Y[:, -1, :].squeeze()))
if self.args.stacks == 2:
res_mid = torch.cat((res_mid, res[:,-1,:].squeeze()))
scale = data.scale.expand(output.size(0),data.m)
bias = data.bias.expand(output.size(0), data.m)
if self.args.stacks == 2:
output_res = res[:,-1,:].squeeze()
total_loss += self.evaluateL2(output * scale + bias, true * scale+ bias).item()
total_loss_l1 += self.evaluateL1(output * scale+ bias, true * scale+ bias).item()
if self.args.stacks == 2:
total_loss_mid += self.evaluateL2(output_res * scale+ bias, true * scale+ bias).item()
total_loss_l1_mid += self.evaluateL1(output_res * scale+ bias, true * scale+ bias).item()
n_samples += (output.size(0) * data.m)
forecast_Norm = torch.cat(forecast_set, axis=0)
target_Norm = torch.cat(target_set, axis=0)
if self.args.stacks == 2:
Mid_Norm = torch.cat(Mid_set, axis=0)
rse_final_each = []
rae_final_each = []
corr_final_each = []
Scale = data.scale.expand(forecast_Norm.size(0),data.m)
bias = data.bias.expand(forecast_Norm.size(0),data.m)
if not self.args.single_step: #single step
for i in range(forecast_Norm.shape[1]): #get results of each step
lossL2_F = self.evaluateL2(forecast_Norm[:,i,:] * Scale + bias, target_Norm[:,i,:] * Scale+ bias).item()
lossL1_F = self.evaluateL1(forecast_Norm[:,i,:] * Scale+ bias, target_Norm[:,i,:] * Scale+ bias).item()
if self.args.stacks == 2:
lossL2_M = self.evaluateL2(Mid_Norm[:, i, :] * Scale+ bias, target_Norm[:, i, :] * Scale+ bias).item()
lossL1_M = self.evaluateL1(Mid_Norm[:, i, :] * Scale+ bias, target_Norm[:, i, :] * Scale+ bias).item()
rse_F = math.sqrt(lossL2_F / forecast_Norm.shape[0]/ data.m) / data.rse
rae_F = (lossL1_F / forecast_Norm.shape[0]/ data.m) / data.rae
rse_final_each.append(rse_F.item())
rae_final_each.append(rae_F.item())
pred = forecast_Norm[:,i,:].data.cpu().numpy()
y_true = target_Norm[:,i,:].data.cpu().numpy()
sig_p = pred.std(axis=0)
sig_g = y_true.std(axis=0)
m_p = pred.mean(axis=0)
m_g = y_true.mean(axis=0)
ind = (sig_p * sig_g != 0)
corr = ((pred - m_p) * (y_true - m_g)).mean(axis=0) / (sig_p * sig_g)
corr = (corr[ind]).mean()
corr_final_each.append(corr)
rse = math.sqrt(total_loss / n_samples) / data.rse
rae = (total_loss_l1 / n_samples) / data.rae
if self.args.stacks == 2:
rse_mid = math.sqrt(total_loss_mid / n_samples) / data.rse
rae_mid = (total_loss_l1_mid / n_samples) / data.rae
# only calculate the last step for financial datasets.
predict = forecast_Norm.cpu().numpy()[:,-1,:]
Ytest = target_Norm.cpu().numpy()[:,-1,:]
sigma_p = predict.std(axis=0)
sigma_g = Ytest.std(axis=0)
mean_p = predict.mean(axis=0)
mean_g = Ytest.mean(axis=0)
index = (sigma_p * sigma_g != 0)
correlation = ((predict - mean_p) * (Ytest - mean_g)).mean(axis=0) / (sigma_p * sigma_g)
correlation = (correlation[index]).mean()
if self.args.stacks == 2:
mid_pred = Mid_Norm.cpu().numpy()[:,-1,:]
sigma_mid = mid_pred.std(axis=0)
mean_mid = mid_pred.mean(axis=0)
index_mid = (sigma_mid * sigma_g != 0)
correlation_mid = ((mid_pred - mean_mid) * (Ytest - mean_g)).mean(axis=0) / (sigma_mid * sigma_g)
correlation_mid = (correlation_mid[index_mid]).mean()
print(
'|valid_final rse {:5.4f} | valid_final rae {:5.4f} | valid_final corr {:5.4f}'.format(
rse, rae, correlation), flush=True)
if self.args.stacks == 2:
print(
'|valid_mid rse {:5.4f} | valid_mid rae {:5.4f} | valid_mid corr {:5.4f}'.format(
rse_mid, rae_mid, correlation_mid), flush=True)
if self.args.stacks == 1:
return rse, rae, correlation
if self.args.stacks == 2:
return rse, rae, correlation, rse_mid, rae_mid, correlation_mid
|
the-stack_106_30172 | from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import FormView
from pointtracker.forms import PointTrackerLoginForm
from swesite.contexts.swe_social_context import swe_social
from swesite.contexts.swe_volunteer_context import swe_volunteer
from users.spreadsheet import points_sheet, client, creds
import re
class PointTrackerLoginView(FormView):
template_name = 'pointtracker/pointtracker.html'
form_class = PointTrackerLoginForm
success_url = reverse_lazy('pointtracker')
def get_context_data(self, **kwargs):
context = super(PointTrackerLoginView, self).get_context_data(**kwargs)
context['swe_social'] = swe_social(request=PointTrackerLoginView)
context['swe_volunteer'] = swe_volunteer(request=PointTrackerLoginView)
return context
def form_valid(self, form):
return super(PointTrackerLoginView, self).form_valid(form)
def post(self, request, *args, **kwargs):
# clean data
ulid = request.POST['ulid'].strip().lower()
first_name = request.POST['first_name'].strip().lower()
last_name = request.POST['last_name'].strip().lower()
# re-validate token if it's expired
if creds.access_token_expired:
client.login()
if self.member_exists(ulid=ulid, first_name=first_name, last_name=last_name):
member_row = self.get_member_row(ulid=ulid)
"""
get_all_records() doesn't include the heading,
and puts the first member at 0
member_row is the member's actual row number in the spreadsheet, including the heading,
AND the rows start at 1
so member_row - 2 must be used
if confused, look at the values in debug mode
"""
member_data = points_sheet.get_all_records()[member_row - 2]
"""
(assumes a 0 in the event column means member did not attend event)
"""
events_attended = {
event: points_earned for event, points_earned in member_data.items()
if points_earned != "" and
points_earned != 0 and
isinstance(points_earned, (int, float)) and
event != "total"
}
context = self.get_context_data(**kwargs)
context['member_data'] = member_data
context['events_attended'] = events_attended
return render(request, 'pointtracker/member_point_sheet.html', context=context)
else:
context = self.get_context_data(**kwargs)
return render(request, 'pointtracker/member_not_found.html', context=context)
def member_exists(self, **kwargs):
ulid = kwargs['ulid']
first_name = kwargs['first_name']
last_name = kwargs['last_name']
try:
points_sheet.find(re.compile(f'{ulid}', re.IGNORECASE))
points_sheet.find(re.compile(f'{first_name}', re.IGNORECASE))
points_sheet.find(re.compile(f'{last_name}', re.IGNORECASE))
return True
except:
return False
def get_member_row(self, **kwargs):
return points_sheet.find(kwargs['ulid']).row
|
the-stack_106_30173 | # -*-coding:utf-8-*-
# 天堂图片网爬取高质量图片
import urllib.request as urllib2
import os
import random, re
from bs4 import BeautifulSoup
'''
# user_agent是爬虫与反爬虫斗争的第一步
ua_headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
}'''
# 用于模拟http头的User-agent
ua_list = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; rv2.0.1) Gecko/20100101 Firefox/4.0.1",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"
]
user_agent=random.choice(ua_list)
#要爬取的关键词,中文编码出错,待解决
Img_Name='new'
url_pre="http://www.ivsky.com/search.php?q="+Img_Name+"&PageNo="
# 构造图片页数
# 利用抛出错误的代码,判断结果小于2也的情况
page_count1=0
page_count2=1
while page_count2>page_count1:
request_pre=urllib2.Request(url=url_pre+str(page_count2))
request_pre.add_header('User-Agent',user_agent)
response_pre=urllib2.urlopen(request_pre)
soup_pre=BeautifulSoup(response_pre,"html.parser")
aaa=soup_pre.find_all('div',{'class':'pagelist'})
for a_a in aaa:
a_a_a=a_a.get_text(',')
a_a_a=a_a_a.split(',')
page_count1=int(a_a_a[-2])
if a_a_a[-1]!='下一页':
break
print('正在计算总页数,已搜索到第%s页' %page_count1)
request_pre1=urllib2.Request(url=url_pre+str(page_count1))
request_pre1.add_header('User-Agent',user_agent)
response_pre1=urllib2.urlopen(request_pre1)
soup_pre1=BeautifulSoup(response_pre1,"html.parser")
aaa1=soup_pre1.find_all('div',{'class':'pagelist'})
for a_a1 in aaa1:
a_a_a1=a_a1.get_text(',')
a_a_a1=a_a_a1.split(',')
page_count2=int(a_a_a1[-2])
if a_a_a[-1]!='下一页':
break
if page_count1>page_count2:
page_count=page_count1
else:
page_count=page_count2
# 得用类解决上边代码重复问题
page_number_s=0
# 图片总页数,待更新自动获取总页数。
#page_count=1
print('计算完成,关键词为%s的图片总计有%s页' %(Img_Name,page_count))
print('现在开始下载...')
for p in range(page_count):
page_number_s=page_number_s+1
page_number=str(page_number_s)
# 构建URL
url=url_pre+page_number
# 通过Request()方法构造一个请求对象
request1=urllib2.Request(url=url)
# 把头添加进去
request1.add_header('User-Agent',user_agent)
# 向指定的url地址发送请求,并返回服务器响应的类文件对象
response=urllib2.urlopen(request1)
# 服务器返回的类文件对象支持python文件对象的操作方法
#html=response.read()
#print(html.decode('utf-8'))
#如出现编码错误,试试这个 response.encoding=('utf-8', 'ignore')
#.decode('utf-8', 'ignore').replace(u'\xa9', u'')
soup=BeautifulSoup(response,"html.parser")
#for i in soup.find_all('div',{'class':'il_img'}):
img_name=0
for i in soup.find_all('div',{'class':{'il_img',}}):
img_name=img_name+1
for ii in i.find_all('a'):
# 可以直接取属性获得href内容 https://bbs.csdn.net/topics/392161042?list=lz
url2='http://www.ivsky.com'+ii['href']
request2=urllib2.Request(url=url2)
request2.add_header('User-Agent',user_agent)
response2=urllib2.urlopen(request2)
#response2.encoding=('utf-8', 'ignore')
soup2=BeautifulSoup(response2,"html.parser")
soup22=soup2.find_all('img',{'id':'imgis'})
#url3=soup2.find_all('div',{'class':'bt-green'})
img_url=re.findall('src="+(.*)"', str(soup22))[0]
# 这是MAC下的目录
#urllib2.urlretrieve(img_url,'/Users/lhuibin/py/img/%s%s.jpg' % (page_number_s,img_name))
# 如果文件夹不存在,则创建文件夹
if 'img' not in os.listdir():
os.makedirs('img')
# 这是WIN10HOME下的目录
urllib2.urlretrieve(img_url,'img/%s%s.jpg' % (page_number_s,img_name))
print('正在下载第%s页第%s张图片,总计%s页' %(page_number_s,img_name,page_count))
print('存储为img/%s%s.jpg' % (page_number_s,img_name))
print("已经全部下载完毕!")
|
the-stack_106_30174 | from typing import List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .modules import activations, norm2d
class DoubleConv2d(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
norm: str = "identity",
activation: str = "relu",
):
super().__init__()
self.down = nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
padding=1,
padding_mode="zeros",
),
norm2d(norm, num_features=out_channels),
activations[activation],
nn.Conv2d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
padding=1,
padding_mode="zeros",
),
norm2d(norm, num_features=out_channels),
activations[activation],
)
def forward(self, x: Tensor) -> Tensor:
return self.down(x)
class UNetEncoder(nn.Module):
def __init__(
self,
in_channels: int,
norm: str = "identity",
activation: str = "relu",
):
super().__init__()
self.downs = nn.ModuleList()
self.downs.append(
DoubleConv2d(
in_channels=in_channels,
out_channels=64,
norm=norm,
activation=activation,
)
)
self.downs.append(
DoubleConv2d(
in_channels=64,
out_channels=128,
norm=norm,
activation=activation,
)
)
self.downs.append(
DoubleConv2d(
in_channels=128,
out_channels=256,
norm=norm,
activation=activation,
)
)
self.downs.append(
DoubleConv2d(
in_channels=256,
out_channels=512,
norm=norm,
activation=activation,
)
)
self.depth = len(self.downs)
def forward(self, x) -> Tuple[Tensor, List[Tensor]]:
downs_x = []
for i, layer in enumerate(self.downs):
x = layer(x)
if i < self.depth - 1:
downs_x.append(x)
x = F.max_pool2d(x, kernel_size=2, stride=2)
return x, downs_x
class UpSample(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
norm: str = "identity",
activation: str = "relu",
):
super().__init__()
self.up = nn.ModuleList(
[
nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=2,
stride=2,
padding=0,
),
DoubleConv2d(
in_channels=in_channels,
out_channels=out_channels,
norm=norm,
activation=activation,
),
]
)
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
x1 = self.up[0](x1)
x1 = self.up[1](torch.cat([x1, x2], dim=1))
return x1
class UNetDecoder(nn.Module):
def __init__(self, norm="identity", activation="relu"):
super().__init__()
self.ups = nn.ModuleList()
self.ups.append(
UpSample(
in_channels=512,
out_channels=256,
norm=norm,
activation=activation,
)
)
self.ups.append(
UpSample(
in_channels=256,
out_channels=128,
norm=norm,
activation=activation,
)
)
self.ups.append(
UpSample(
in_channels=128,
out_channels=64,
norm=norm,
activation=activation,
)
)
self.depth = len(self.ups)
def forward(self, x1: Tensor, x2: List[Tensor]) -> torch.Tensor:
for i, layer in enumerate(self.ups):
x1 = layer(x1, x2[-(i + 1)])
return x1
class UNet(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
norm: str = "identity",
activation: str = "relu",
final_act: str = "tanh",
):
super().__init__()
self.encoder = UNetEncoder(
in_channels=in_channels, norm=norm, activation=activation
)
self.decoder = UNetDecoder(norm=norm, activation=activation)
self.out = nn.Sequential(
nn.Conv2d(
in_channels=64,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
),
activations[final_act],
)
def forward(self, x) -> Tensor:
x1, x2 = self.encoder(x)
x = self.decoder(x1, x2)
x = self.out(x)
return x
|
the-stack_106_30175 | """operating system relevant deploy functions
author: Andreas Poehlmann
"""
import argparse
import ctypes
import logging
import os
import platform
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
from textwrap import dedent
from builtins import input, str
from future.standard_library import hooks
with hooks():
from urllib.request import urlopen
from urllib.error import HTTPError
try:
# noinspection PyProtectedMember
from textwrap import indent as _indent
except ImportError:
# noinspection PyUnusedLocal
def _indent(text, prefix, predicate=None):
return u"".join(prefix + line for line in text.splitlines(True))
_GITHUB_REPO_URL = 'https://raw.githubusercontent.com/ap--/python-seabreeze/master/os_support'
_UDEV_RULES_PATH = '/etc/udev/rules.d/10-oceanoptics.rules'
_DRIVERS_ZIP_FN = 'windows-driver-files.zip'
_log = logging.getLogger(__name__)
def _diff_files(file1, file2):
"""diff two files using linux `diff`"""
try:
return subprocess.check_output(['diff', file1, file2]).decode('utf8')
except subprocess.CalledProcessError as err:
return err.output.decode('utf8')
def _request_confirmation(question):
"""require user input to continue"""
while True:
user_input = input('{} [y/n] '.format(question)).lower()
if user_input not in {'y', 'n'}:
_log.info("Please enter 'y' or 'n'.")
elif user_input[0] == 'n':
return False
else:
return True
time.sleep(0.1)
def linux_install_udev_rules():
"""verify and install the udev rules"""
parser = argparse.ArgumentParser()
parser.add_argument('--overwrite-existing', help="overwrite rules if already present", action='store_true')
parser.add_argument('rules', help="rules file (default: download from github)", default='', nargs='?')
args = parser.parse_args()
if args.rules:
if not os.path.exists(args.rules):
raise IOError("rules file '{}' doesn't exist".format(args.rules))
udev_tmp_file = None
udev_fn = args.rules
else:
udev_tmp_file = tempfile.NamedTemporaryFile()
udev_fn = udev_tmp_file.name
try:
# download rules from github if no file is provided
if udev_tmp_file is not None:
url = '{}/{}'.format(_GITHUB_REPO_URL, os.path.basename(_UDEV_RULES_PATH))
try:
_log.info("downloading rules from github")
udev_data = urlopen(url).read()
except HTTPError:
_log.error("can't download '{}'".format(url))
sys.exit(1)
udev_tmp_file.write(udev_data)
udev_tmp_file.flush()
# check if rules need to be overwritten
if os.path.exists(_UDEV_RULES_PATH) and not args.overwrite_existing:
rules_differ = _diff_files(_UDEV_RULES_PATH, udev_fn)
if not rules_differ:
_log.info("udev rules already newest version")
sys.exit(0)
else:
_log.info(_indent(rules_differ, u' ').rstrip())
_log.info("udev rules differ. To overwrite run with '--overwrite-existing'")
sys.exit(1)
if not _request_confirmation("Install udev rules?"):
sys.exit(0)
# cp rules and execute
_log.info('Copying udev rules to {}'.format(_UDEV_RULES_PATH))
subprocess.call(['sudo', 'cp', udev_fn, _UDEV_RULES_PATH])
_log.info('Calling udevadm control --reload-rules')
subprocess.call(['sudo', 'udevadm', 'control', '--reload-rules'])
_log.info('Success')
sys.exit(0)
finally:
if udev_tmp_file is not None:
udev_tmp_file.close() # removes tempfile
def _windows_is_admin():
"""windows only: check if running as admin"""
# noinspection PyBroadException
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except Exception:
return False
def _is_contained_in_dir(files, cdir=None):
cdir = os.path.abspath(cdir or os.path.curdir)
for f in files:
f_abs = os.path.abspath(f)
if not os.path.commonprefix((f_abs, cdir)).startswith(cdir):
return False
return True
def _unicode(x):
try:
return unicode(x)
except NameError:
return x
def windows_install_drivers():
"""install driver inf files via pnputil in an elevated shell"""
if not _request_confirmation("Install windows drivers?"):
sys.exit(0)
if not _windows_is_admin():
# Re-run the program with admin rights
argv = [__file__] + sys.argv[1:]
ret = ctypes.windll.shell32.ShellExecuteW(None,
_unicode("runas"),
_unicode(sys.executable),
_unicode(subprocess.list2cmdline(argv)),
None,
1)
if ret > 32:
_log.info('Launched admin shell')
else:
_log.info('Failed to launch admin shell. Error code {}'.format(ret))
sys.exit(0 if ret > 32 else 1)
# running as admin
parser = argparse.ArgumentParser()
parser.add_argument('drivers_zip', help="drivers zip file (default: download from github)", default='', nargs='?')
args = parser.parse_args()
if args.drivers_zip:
if not os.path.exists(args.drivers_zip):
raise IOError("drivers_zip file '{}' doesn't exist".format(args.drivers_zip))
drivers_zip = args.drivers_zip
else:
drivers_zip = None
tmp_dir = tempfile.mkdtemp(prefix='seabreeze-os-')
# noinspection PyBroadException
try:
# download driver files
if drivers_zip is None:
url = '{}/{}'.format(_GITHUB_REPO_URL, os.path.basename(_DRIVERS_ZIP_FN))
drivers_zip = os.path.join(tmp_dir, _DRIVERS_ZIP_FN)
with open(drivers_zip, 'wb') as dzip:
try:
_log.info("Downloading windows drivers from github")
drivers_zip_data = urlopen(url).read()
except HTTPError:
_log.error("Can't download '{}'".format(url))
sys.exit(1)
dzip.write(drivers_zip_data)
# extract driver files
with zipfile.ZipFile(drivers_zip, 'r') as dzip:
if not _is_contained_in_dir(dzip.namelist()):
raise Exception("Zipfile contains non subdir paths")
dzip.extractall(tmp_dir)
_log.info("Extracted to temporary directory {}".format(tmp_dir))
# use correct pnputil with 32bit pythons
if '32bit' in platform.architecture():
pnputil = r'%systemroot%\Sysnative\pnputil.exe'
else:
pnputil = 'pnputil.exe'
# install with pnp util
cmd = [pnputil, '-i', '-a', os.path.join(tmp_dir, '*.inf')]
return_code = subprocess.call(cmd, shell=True)
_log.warn(dedent("""\
Note: Some of the drivers currently don't have valid signatures.
Look at the output above. If the spectrometer you want to use only
provides an unsigned driver, you might have to install it manually.
If you encounter this issue, please report it on github."""))
if return_code == 0:
_log.info("Success")
elif return_code == 3010:
_log.info("Success! REBOOT REQUIRED!")
else:
_log.error("pnputil returned with {}".format(return_code))
except Exception:
_log.error("Error when installing drivers")
finally:
shutil.rmtree(tmp_dir)
input('Press [enter] to close.')
def main():
logging.basicConfig(level=logging.INFO, format='%(message)s')
system = platform.system()
if system == "Windows":
windows_install_drivers()
elif system == "Linux":
linux_install_udev_rules()
else:
_log.info("Nothing to do for system '{}'".format(system))
sys.exit(0)
if __name__ == "__main__":
# fix for windows entry_point shims, which are actually .exe files...
main()
|
the-stack_106_30177 |
# coding: utf-8
# In[ ]:
class Solution:
# @param a list of integers
# @return an integer
#算法思路:
#step1:创建两个indices,一个read,一个write,分别开始遍历整个array
#step2:read和write的遍历方式不一样,write遇到replicate会停下,read不会停下
#step3:write如果停下,那么它要等到read将replicate的部分走完了,它才能前进
#step4:如果前后没有duplicate,A[write]=A[read]
def removeDuplicates(self, A):
if not A:
return 0
end = len(A)
read = 1
write = 1
while read < end:
if A[read] != A[read-1]:
A[write] = A[read]
write += 1
read += 1
return write
#注意!!!:题目要求不能建立任何新的space,这里的解法只在原来的array A上操作,但我并不确定这个解法是否占用了新的memory
|
the-stack_106_30178 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component.dataio import DataIO
from pipeline.component.homo_secureboost import HomoSecureBoost
from pipeline.component.reader import Reader
from pipeline.interface.data import Data
from pipeline.utils.tools import load_job_config
from pipeline.runtime.entity import JobParameters
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
backend = config.backend
work_mode = config.work_mode
guest_train_data = {"name": "vehicle_scale_homo_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "vehicle_scale_homo_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
dataio_0 = DataIO(name="dataio_0")
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
dataio_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True, output_format="dense")
dataio_0.get_party_instance(role='host', party_id=host).component_param(with_label=True, output_format="dense")
homo_secureboost_0 = HomoSecureBoost(name="homo_secureboost_0",
num_trees=3,
task_type='classification',
objective_param={"objective": "cross_entropy"},
tree_param={
"max_depth": 3
},
cv_param={
"need_cv": True,
"shuffle": False,
"n_splits": 5
}
)
pipeline.add_component(reader_0)
pipeline.add_component(dataio_0, data=Data(data=reader_0.output.data))
pipeline.add_component(homo_secureboost_0, data=Data(train_data=dataio_0.output.data))
pipeline.compile()
job_parameters = JobParameters(backend=backend, work_mode=work_mode)
pipeline.fit(job_parameters)
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
|
the-stack_106_30181 | import math
from django import template
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
register = template.Library()
def _review_score_number(context, score):
if score is None:
return "×"
score = round(score, 1)
if not context:
return str(score)
max_score = context["request"].event.settings.get("review_max_score")
if isinstance(score, int) or (isinstance(score, float) and score.is_integer()):
score = int(score)
tooltip = (
context["request"].event.settings.get(f"review_score_name_{score}") or ""
)
if tooltip:
tooltip = f"'{tooltip}'"
else:
lower_bound = math.floor(score)
lower = context["request"].event.settings.get(
f"review_score_name_{lower_bound}"
)
upper = context["request"].event.settings.get(
f"review_score_name_{lower_bound + 1}"
)
tooltip = _("Between '{lower}' and '{upper}'.").format(lower=lower, upper=upper)
result = f"{score}/{max_score}"
if not tooltip:
return result
return format_html(
'<span data-toggle="tooltip" title="{}">{}</span>', tooltip, result
)
def _review_score_override(positive_overrides, negative_overrides):
result = ""
if positive_overrides:
result += '<i class="fa fa-arrow-circle-up override text-success"></i>'
if positive_overrides > 1 or negative_overrides:
result += f" {positive_overrides}"
if negative_overrides:
result += '<i class="fa fa-arrow-circle-down override text-danger"></i>'
if negative_overrides > 1 or positive_overrides:
result += f" {negative_overrides}"
return result
@register.simple_tag(takes_context=True)
def review_score(context, submission, user_score=False):
score = submission.current_score if not user_score else submission.user_score
if score is None:
return "-"
if hasattr(submission, "has_override") and not submission.has_override:
return _review_score_number(context, score)
positive_overrides = submission.reviews.filter(override_vote=True).count()
negative_overrides = submission.reviews.filter(override_vote=False).count()
if positive_overrides or negative_overrides:
return mark_safe(_review_score_override(positive_overrides, negative_overrides))
return _review_score_number(context, score)
|
the-stack_106_30185 | #!/usr/bin/env python3
import importlib
import inspect
import json
import os
import re
import shutil
import subprocess
import sys
import textwrap
import traceback
from pathlib import Path
from typing import Any
from unittest.mock import patch
from ansi2html import Ansi2HTMLConverter
from devtools import PrettyFormat
THIS_DIR = Path(__file__).parent
DOCS_DIR = (THIS_DIR / '..').resolve()
EXAMPLES_DIR = DOCS_DIR / 'examples'
TMP_EXAMPLES_DIR = DOCS_DIR / '.tmp_examples'
MAX_LINE_LENGTH = int(re.search(r'max_line_length = (\d+)', (EXAMPLES_DIR / '.editorconfig').read_text()).group(1))
LONG_LINE = 50
pformat = PrettyFormat(simple_cutoff=LONG_LINE)
def to_string(value: Any) -> str:
# attempt to build a pretty equivalent of the print output
if isinstance(value, (dict, list, tuple, set)):
return pformat(value)
elif isinstance(value, str) and any(re.fullmatch(r, value, flags=re.DOTALL) for r in ['{".+}', r'\[.+\]']):
try:
obj = json.loads(value)
except ValueError:
# not JSON, not a problem
pass
else:
s = json.dumps(obj)
if len(s) > LONG_LINE:
json.dumps(obj, indent=2)
else:
return s
return str(value)
class MockPrint:
def __init__(self, file: Path):
self.file = file
self.statements = []
def __call__(self, *args, file=None, flush=None):
frame = inspect.currentframe().f_back.f_back.f_back
if not self.file.samefile(frame.f_code.co_filename):
# happens when index_error.py imports index_main.py
return
s = ' '.join(map(to_string, args))
lines = []
for line in s.split('\n'):
if len(line) > MAX_LINE_LENGTH - 3:
lines += textwrap.wrap(line, width=MAX_LINE_LENGTH - 3)
else:
lines.append(line)
self.statements.append((frame.f_lineno, lines))
def all_md_contents() -> str:
file_contents = []
for f in DOCS_DIR.glob('**/*.md'):
file_contents.append(f.read_text())
return '\n\n\n'.join(file_contents)
def gen_ansi_output():
conv = Ansi2HTMLConverter()
input_file = EXAMPLES_DIR / 'devtools_main.py'
os.environ['PY_DEVTOOLS_HIGHLIGHT'] = 'true'
p = subprocess.run((sys.executable, str(input_file)), stdout=subprocess.PIPE, check=True, encoding='utf8')
html = conv.convert(p.stdout, full=False).strip('\r\n')
full_html = f'<div class="terminal">\n<pre class="terminal-content">\n{html}\n</pre>\n</div>'
path = TMP_EXAMPLES_DIR / f'{input_file.stem}.html'
path.write_text(full_html)
print(f'generated ansi output to {path}')
def exec_examples():
errors = []
all_md = all_md_contents()
new_files = {}
os.environ.update({'my_auth_key': 'xxx', 'my_api_key': 'xxx'})
sys.path.append(str(EXAMPLES_DIR))
for file in sorted(EXAMPLES_DIR.iterdir()):
def error(desc: str):
errors.append((file, desc))
sys.stderr.write(f'error in {file.name}: {desc}\n')
if not file.is_file():
# __pycache__, maybe others
continue
if file.suffix != '.py':
# just copy
new_files[file.name] = file.read_text()
continue
if f'{{!.tmp_examples/{file.name}!}}' not in all_md:
error('file not used anywhere')
file_text = file.read_text('utf-8')
if '\n\n\n' in file_text:
error('too many new lines')
if not file_text.endswith('\n'):
error('no trailing new line')
if re.search('^ *# *>', file_text, flags=re.M):
error('contains comments with print output, please remove')
dont_execute_re = re.compile(r'^# dont-execute\n', flags=re.M)
if dont_execute_re.search(file_text):
lines = dont_execute_re.sub('', file_text).split('\n')
else:
no_print_intercept_re = re.compile(r'^# no-print-intercept\n', flags=re.M)
no_print_intercept = bool(no_print_intercept_re.search(file_text))
if no_print_intercept:
file_text = no_print_intercept_re.sub('', file_text)
mp = MockPrint(file)
mod = None
with patch('builtins.print') as mock_print:
if not no_print_intercept:
mock_print.side_effect = mp
try:
mod = importlib.import_module(file.stem)
except Exception:
tb = traceback.format_exception(*sys.exc_info())
error(''.join(e for e in tb if '/pydantic/docs/examples/' in e or not e.startswith(' File ')))
if mod and not mod.__file__.startswith(str(EXAMPLES_DIR)):
error(f'module path "{mod.__file__}" not inside "{EXAMPLES_DIR}", name may shadow another module?')
lines = file_text.split('\n')
to_json_line = '# output-json'
if to_json_line in lines:
lines = [line for line in lines if line != to_json_line]
if len(mp.statements) != 1:
error('should only have one print statement')
new_files[file.stem + '.json'] = '\n'.join(mp.statements[0][1]) + '\n'
else:
for line_no, print_lines in reversed(mp.statements):
if len(print_lines) > 2:
text = '"""\n{}\n"""'.format('\n'.join(print_lines))
else:
text = '\n'.join('#> ' + l for l in print_lines)
lines.insert(line_no, text)
try:
ignore_above = lines.index('# ignore-above')
except ValueError:
pass
else:
lines = lines[ignore_above + 1 :]
lines = '\n'.join(lines).split('\n')
if any(len(l) > MAX_LINE_LENGTH for l in lines):
error(f'lines longer than {MAX_LINE_LENGTH} characters')
new_files[file.name] = '\n'.join(lines)
if errors:
print(f'\n{len(errors)} errors, not writing files\n')
return 1
if TMP_EXAMPLES_DIR.exists():
shutil.rmtree(TMP_EXAMPLES_DIR)
print(f'writing {len(new_files)} example files to {TMP_EXAMPLES_DIR}')
TMP_EXAMPLES_DIR.mkdir()
for file_name, content in new_files.items():
(TMP_EXAMPLES_DIR / file_name).write_text(content, 'utf-8')
gen_ansi_output()
return 0
if __name__ == '__main__':
sys.exit(exec_examples())
|
the-stack_106_30186 | from __future__ import absolute_import
import sys
from argparse import ArgumentParser
import pytest
from configargparse import Namespace
from pytest_mock.plugin import MockerFixture
from snakebids.admin import gen_parser
@pytest.fixture
def parser():
return gen_parser()
class TestAdminCli:
def test_fails_if_no_subcommand(
self, parser: ArgumentParser, mocker: MockerFixture
):
mocker.patch.object(sys, "argv", ["snakebids"])
with pytest.raises(SystemExit):
parser.parse_args()
def test_fails_if_invalid_subcommand(
self, parser: ArgumentParser, mocker: MockerFixture
):
mocker.patch.object(sys, "argv", ["snakebids", "dummy"])
with pytest.raises(SystemExit):
parser.parse_args()
def test_create_succeeds(self, parser: ArgumentParser, mocker: MockerFixture):
mocker.patch.object(sys, "argv", ["snakebids", "create"])
assert isinstance(parser.parse_args(), Namespace)
def test_boutiques_succeeds(self, parser: ArgumentParser, mocker: MockerFixture):
mocker.patch.object(sys, "argv", ["snakebids", "boutiques", "test.json"])
assert isinstance(parser.parse_args(), Namespace)
|
the-stack_106_30187 | """Mapping registries for Zigbee Home Automation."""
from __future__ import annotations
import collections
from typing import Callable, Dict
import attr
from zigpy import zcl
import zigpy.profiles.zha
import zigpy.profiles.zll
from homeassistant.components.alarm_control_panel import DOMAIN as ALARM
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.climate import DOMAIN as CLIMATE
from homeassistant.components.cover import DOMAIN as COVER
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.number import DOMAIN as NUMBER
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
# importing channels updates registries
from . import channels as zha_channels # noqa: F401 pylint: disable=unused-import
from .decorators import CALLABLE_T, DictRegistry, SetRegistry
from .typing import ChannelType
GROUP_ENTITY_DOMAINS = [LIGHT, SWITCH, FAN]
PHILLIPS_REMOTE_CLUSTER = 0xFC00
SMARTTHINGS_ACCELERATION_CLUSTER = 0xFC02
SMARTTHINGS_ARRIVAL_SENSOR_DEVICE_TYPE = 0x8000
SMARTTHINGS_HUMIDITY_CLUSTER = 0xFC45
VOC_LEVEL_CLUSTER = 0x042E
REMOTE_DEVICE_TYPES = {
zigpy.profiles.zha.PROFILE_ID: [
zigpy.profiles.zha.DeviceType.COLOR_CONTROLLER,
zigpy.profiles.zha.DeviceType.COLOR_DIMMER_SWITCH,
zigpy.profiles.zha.DeviceType.COLOR_SCENE_CONTROLLER,
zigpy.profiles.zha.DeviceType.DIMMER_SWITCH,
zigpy.profiles.zha.DeviceType.LEVEL_CONTROL_SWITCH,
zigpy.profiles.zha.DeviceType.NON_COLOR_CONTROLLER,
zigpy.profiles.zha.DeviceType.NON_COLOR_SCENE_CONTROLLER,
zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,
zigpy.profiles.zha.DeviceType.ON_OFF_LIGHT_SWITCH,
zigpy.profiles.zha.DeviceType.REMOTE_CONTROL,
zigpy.profiles.zha.DeviceType.SCENE_SELECTOR,
],
zigpy.profiles.zll.PROFILE_ID: [
zigpy.profiles.zll.DeviceType.COLOR_CONTROLLER,
zigpy.profiles.zll.DeviceType.COLOR_SCENE_CONTROLLER,
zigpy.profiles.zll.DeviceType.CONTROL_BRIDGE,
zigpy.profiles.zll.DeviceType.CONTROLLER,
zigpy.profiles.zll.DeviceType.SCENE_CONTROLLER,
],
}
REMOTE_DEVICE_TYPES = collections.defaultdict(list, REMOTE_DEVICE_TYPES)
SINGLE_INPUT_CLUSTER_DEVICE_CLASS = {
# this works for now but if we hit conflicts we can break it out to
# a different dict that is keyed by manufacturer
SMARTTHINGS_ACCELERATION_CLUSTER: BINARY_SENSOR,
SMARTTHINGS_HUMIDITY_CLUSTER: SENSOR,
VOC_LEVEL_CLUSTER: SENSOR,
zcl.clusters.closures.DoorLock.cluster_id: LOCK,
zcl.clusters.closures.WindowCovering.cluster_id: COVER,
zcl.clusters.general.AnalogInput.cluster_id: SENSOR,
zcl.clusters.general.AnalogOutput.cluster_id: NUMBER,
zcl.clusters.general.MultistateInput.cluster_id: SENSOR,
zcl.clusters.general.OnOff.cluster_id: SWITCH,
zcl.clusters.general.PowerConfiguration.cluster_id: SENSOR,
zcl.clusters.homeautomation.ElectricalMeasurement.cluster_id: SENSOR,
zcl.clusters.hvac.Fan.cluster_id: FAN,
zcl.clusters.measurement.CarbonDioxideConcentration.cluster_id: SENSOR,
zcl.clusters.measurement.CarbonMonoxideConcentration.cluster_id: SENSOR,
zcl.clusters.measurement.FormaldehydeConcentration.cluster_id: SENSOR,
zcl.clusters.measurement.IlluminanceMeasurement.cluster_id: SENSOR,
zcl.clusters.measurement.OccupancySensing.cluster_id: BINARY_SENSOR,
zcl.clusters.measurement.PressureMeasurement.cluster_id: SENSOR,
zcl.clusters.measurement.RelativeHumidity.cluster_id: SENSOR,
zcl.clusters.measurement.TemperatureMeasurement.cluster_id: SENSOR,
zcl.clusters.security.IasZone.cluster_id: BINARY_SENSOR,
zcl.clusters.smartenergy.Metering.cluster_id: SENSOR,
}
SINGLE_OUTPUT_CLUSTER_DEVICE_CLASS = {
zcl.clusters.general.OnOff.cluster_id: BINARY_SENSOR,
zcl.clusters.security.IasAce.cluster_id: ALARM,
}
BINDABLE_CLUSTERS = SetRegistry()
CHANNEL_ONLY_CLUSTERS = SetRegistry()
DEVICE_CLASS = {
zigpy.profiles.zha.PROFILE_ID: {
SMARTTHINGS_ARRIVAL_SENSOR_DEVICE_TYPE: DEVICE_TRACKER,
zigpy.profiles.zha.DeviceType.THERMOSTAT: CLIMATE,
zigpy.profiles.zha.DeviceType.COLOR_DIMMABLE_LIGHT: LIGHT,
zigpy.profiles.zha.DeviceType.COLOR_TEMPERATURE_LIGHT: LIGHT,
zigpy.profiles.zha.DeviceType.DIMMABLE_BALLAST: LIGHT,
zigpy.profiles.zha.DeviceType.DIMMABLE_LIGHT: LIGHT,
zigpy.profiles.zha.DeviceType.DIMMABLE_PLUG_IN_UNIT: LIGHT,
zigpy.profiles.zha.DeviceType.EXTENDED_COLOR_LIGHT: LIGHT,
zigpy.profiles.zha.DeviceType.LEVEL_CONTROLLABLE_OUTPUT: COVER,
zigpy.profiles.zha.DeviceType.ON_OFF_BALLAST: SWITCH,
zigpy.profiles.zha.DeviceType.ON_OFF_LIGHT: LIGHT,
zigpy.profiles.zha.DeviceType.ON_OFF_PLUG_IN_UNIT: SWITCH,
zigpy.profiles.zha.DeviceType.SHADE: COVER,
zigpy.profiles.zha.DeviceType.SMART_PLUG: SWITCH,
zigpy.profiles.zha.DeviceType.IAS_ANCILLARY_CONTROL: ALARM,
},
zigpy.profiles.zll.PROFILE_ID: {
zigpy.profiles.zll.DeviceType.COLOR_LIGHT: LIGHT,
zigpy.profiles.zll.DeviceType.COLOR_TEMPERATURE_LIGHT: LIGHT,
zigpy.profiles.zll.DeviceType.DIMMABLE_LIGHT: LIGHT,
zigpy.profiles.zll.DeviceType.DIMMABLE_PLUGIN_UNIT: LIGHT,
zigpy.profiles.zll.DeviceType.EXTENDED_COLOR_LIGHT: LIGHT,
zigpy.profiles.zll.DeviceType.ON_OFF_LIGHT: LIGHT,
zigpy.profiles.zll.DeviceType.ON_OFF_PLUGIN_UNIT: SWITCH,
},
}
DEVICE_CLASS = collections.defaultdict(dict, DEVICE_CLASS)
CLIENT_CHANNELS_REGISTRY = DictRegistry()
ZIGBEE_CHANNEL_REGISTRY = DictRegistry()
def set_or_callable(value):
"""Convert single str or None to a set. Pass through callables and sets."""
if value is None:
return frozenset()
if callable(value):
return value
if isinstance(value, (frozenset, set, list)):
return frozenset(value)
return frozenset([str(value)])
@attr.s(frozen=True)
class MatchRule:
"""Match a ZHA Entity to a channel name or generic id."""
channel_names: Callable | set[str] | str = attr.ib(
factory=frozenset, converter=set_or_callable
)
generic_ids: Callable | set[str] | str = attr.ib(
factory=frozenset, converter=set_or_callable
)
manufacturers: Callable | set[str] | str = attr.ib(
factory=frozenset, converter=set_or_callable
)
models: Callable | set[str] | str = attr.ib(
factory=frozenset, converter=set_or_callable
)
aux_channels: Callable | set[str] | str = attr.ib(
factory=frozenset, converter=set_or_callable
)
@property
def weight(self) -> int:
"""Return the weight of the matching rule.
Most specific matches should be preferred over less specific. Model matching
rules have a priority over manufacturer matching rules and rules matching a
single model/manufacturer get a better priority over rules matching multiple
models/manufacturers. And any model or manufacturers matching rules get better
priority over rules matching only channels.
But in case of a channel name/channel id matching, we give rules matching
multiple channels a better priority over rules matching a single channel.
"""
weight = 0
if self.models:
weight += 401 - (1 if callable(self.models) else len(self.models))
if self.manufacturers:
weight += 301 - (
1 if callable(self.manufacturers) else len(self.manufacturers)
)
weight += 10 * len(self.channel_names)
weight += 5 * len(self.generic_ids)
weight += 1 * len(self.aux_channels)
return weight
def claim_channels(self, channel_pool: list[ChannelType]) -> list[ChannelType]:
"""Return a list of channels this rule matches + aux channels."""
claimed = []
if isinstance(self.channel_names, frozenset):
claimed.extend([ch for ch in channel_pool if ch.name in self.channel_names])
if isinstance(self.generic_ids, frozenset):
claimed.extend(
[ch for ch in channel_pool if ch.generic_id in self.generic_ids]
)
if isinstance(self.aux_channels, frozenset):
claimed.extend([ch for ch in channel_pool if ch.name in self.aux_channels])
return claimed
def strict_matched(self, manufacturer: str, model: str, channels: list) -> bool:
"""Return True if this device matches the criteria."""
return all(self._matched(manufacturer, model, channels))
def loose_matched(self, manufacturer: str, model: str, channels: list) -> bool:
"""Return True if this device matches the criteria."""
return any(self._matched(manufacturer, model, channels))
def _matched(self, manufacturer: str, model: str, channels: list) -> list:
"""Return a list of field matches."""
if not any(attr.asdict(self).values()):
return [False]
matches = []
if self.channel_names:
channel_names = {ch.name for ch in channels}
matches.append(self.channel_names.issubset(channel_names))
if self.generic_ids:
all_generic_ids = {ch.generic_id for ch in channels}
matches.append(self.generic_ids.issubset(all_generic_ids))
if self.manufacturers:
if callable(self.manufacturers):
matches.append(self.manufacturers(manufacturer))
else:
matches.append(manufacturer in self.manufacturers)
if self.models:
if callable(self.models):
matches.append(self.models(model))
else:
matches.append(model in self.models)
return matches
RegistryDictType = Dict[str, Dict[MatchRule, CALLABLE_T]]
GroupRegistryDictType = Dict[str, CALLABLE_T]
class ZHAEntityRegistry:
"""Channel to ZHA Entity mapping."""
def __init__(self):
"""Initialize Registry instance."""
self._strict_registry: RegistryDictType = collections.defaultdict(dict)
self._loose_registry: RegistryDictType = collections.defaultdict(dict)
self._group_registry: GroupRegistryDictType = {}
def get_entity(
self,
component: str,
manufacturer: str,
model: str,
channels: list[ChannelType],
default: CALLABLE_T = None,
) -> tuple[CALLABLE_T, list[ChannelType]]:
"""Match a ZHA Channels to a ZHA Entity class."""
matches = self._strict_registry[component]
for match in sorted(matches, key=lambda x: x.weight, reverse=True):
if match.strict_matched(manufacturer, model, channels):
claimed = match.claim_channels(channels)
return self._strict_registry[component][match], claimed
return default, []
def get_group_entity(self, component: str) -> CALLABLE_T:
"""Match a ZHA group to a ZHA Entity class."""
return self._group_registry.get(component)
def strict_match(
self,
component: str,
channel_names: Callable | set[str] | str = None,
generic_ids: Callable | set[str] | str = None,
manufacturers: Callable | set[str] | str = None,
models: Callable | set[str] | str = None,
aux_channels: Callable | set[str] | str = None,
) -> Callable[[CALLABLE_T], CALLABLE_T]:
"""Decorate a strict match rule."""
rule = MatchRule(
channel_names, generic_ids, manufacturers, models, aux_channels
)
def decorator(zha_ent: CALLABLE_T) -> CALLABLE_T:
"""Register a strict match rule.
All non empty fields of a match rule must match.
"""
self._strict_registry[component][rule] = zha_ent
return zha_ent
return decorator
def loose_match(
self,
component: str,
channel_names: Callable | set[str] | str = None,
generic_ids: Callable | set[str] | str = None,
manufacturers: Callable | set[str] | str = None,
models: Callable | set[str] | str = None,
aux_channels: Callable | set[str] | str = None,
) -> Callable[[CALLABLE_T], CALLABLE_T]:
"""Decorate a loose match rule."""
rule = MatchRule(
channel_names, generic_ids, manufacturers, models, aux_channels
)
def decorator(zha_entity: CALLABLE_T) -> CALLABLE_T:
"""Register a loose match rule.
All non empty fields of a match rule must match.
"""
self._loose_registry[component][rule] = zha_entity
return zha_entity
return decorator
def group_match(self, component: str) -> Callable[[CALLABLE_T], CALLABLE_T]:
"""Decorate a group match rule."""
def decorator(zha_ent: CALLABLE_T) -> CALLABLE_T:
"""Register a group match rule."""
self._group_registry[component] = zha_ent
return zha_ent
return decorator
ZHA_ENTITIES = ZHAEntityRegistry()
|
the-stack_106_30190 | from odoo import models, fields, api
STATES = {"draft": [("readonly", False)]}
def compute_partition_amount(amount, line_amount, total_amount):
if total_amount > 0:
return round(amount * line_amount / total_amount, 2)
return 0
class PurchaseOrder(models.Model):
_inherit = "purchase.order"
l10n_br_delivery_amount = fields.Monetary(
string="Frete",
compute="_compute_l10n_br_delivery_amount",
inverse="_inverse_l10n_br_delivery_amount",
readonly=True,
states=STATES,
)
l10n_br_expense_amount = fields.Monetary(
string="Despesa",
compute="_compute_l10n_br_expense_amount",
inverse="_inverse_l10n_br_expense_amount",
readonly=True,
states=STATES,
)
l10n_br_insurance_amount = fields.Monetary(
string="Seguro",
compute="_compute_l10n_br_insurance_amount",
inverse="_inverse_l10n_br_insurance_amount",
readonly=True,
states=STATES,
)
def compute_lines_partition(self, line_type):
if line_type not in ("delivery", "expense", "insurance"):
return
total = sum(
line.price_unit * line.product_qty
for line in self.order_line
if not line.is_delivery_expense_or_insurance()
)
filtered_lines = self.order_line.filtered(
lambda x: not x.is_delivery_expense_or_insurance()
)
field_name = "l10n_br_{}_amount".format(line_type)
balance = self[field_name]
for line in filtered_lines:
if line == filtered_lines[-1]:
amount = balance
else:
amount = compute_partition_amount(
self[field_name],
line.price_unit * line.product_qty,
total,
)
line.update({field_name: amount})
balance -= amount
def handle_delivery_expense_insurance_lines(self, line_type):
if line_type not in ("delivery", "expense", "insurance"):
return
boolean_field_name = "l10n_br_is_{}".format(line_type)
amount_field_name = "l10n_br_{}_amount".format(line_type)
line = self.order_line.filtered(lambda x: x[boolean_field_name])
if line and self[amount_field_name] > 0:
line.write(
{
"price_unit": self[amount_field_name],
"product_qty": 1,
}
)
elif line:
line.unlink()
elif self[amount_field_name] > 0:
product_external_id = "l10n_br_account.product_product_{}".format(
line_type
)
product = self.env.ref(product_external_id)
self.write(
{
"order_line": [
(
0,
0,
{
"order_id": self.id,
"product_id": product.id,
"name": product.name_get()[0][1],
"price_unit": self[amount_field_name],
"product_qty": 1,
boolean_field_name: True,
"date_planned": fields.Datetime.now(),
"product_uom": 1,
},
)
]
}
)
self.compute_lines_partition(line_type)
for line in self.order_line.filtered(
lambda x: not x.is_delivery_expense_or_insurance()
):
line._compute_tax_id()
@api.depends(
"order_line",
"order_line.price_unit",
"order_line.product_qty",
)
def _compute_l10n_br_delivery_amount(self):
for item in self:
delivery_line = item.order_line.filtered(
lambda x: x.l10n_br_is_delivery
)
item.l10n_br_delivery_amount = delivery_line.price_total
item.compute_lines_partition("delivery")
def _inverse_l10n_br_delivery_amount(self):
for item in self:
item.handle_delivery_expense_insurance_lines("delivery")
@api.depends(
"order_line",
"order_line.price_unit",
"order_line.product_qty",
)
def _compute_l10n_br_expense_amount(self):
for item in self:
expense_line = item.order_line.filtered(
lambda x: x.l10n_br_is_expense
)
item.l10n_br_expense_amount = expense_line.price_total
item.compute_lines_partition("expense")
def _inverse_l10n_br_expense_amount(self):
for item in self:
item.handle_delivery_expense_insurance_lines("expense")
@api.depends(
"order_line",
"order_line.price_unit",
"order_line.product_qty",
)
def _compute_l10n_br_insurance_amount(self):
for item in self:
insurance_line = item.order_line.filtered(
lambda x: x.l10n_br_is_insurance
)
item.l10n_br_insurance_amount = insurance_line.price_total
item.compute_lines_partition("insurance")
def _inverse_l10n_br_insurance_amount(self):
for item in self:
item.handle_delivery_expense_insurance_lines("insurance")
class PurchaseOrderLine(models.Model):
_inherit = "purchase.order.line"
l10n_br_is_delivery = fields.Boolean(string="É Frete?")
l10n_br_is_expense = fields.Boolean(string="É Despesa?")
l10n_br_is_insurance = fields.Boolean(string="É Seguro?")
l10n_br_delivery_amount = fields.Monetary(string="Frete")
l10n_br_expense_amount = fields.Monetary(string="Despesa")
l10n_br_insurance_amount = fields.Monetary(string="Seguro")
def is_delivery_expense_or_insurance(self):
return (
self.l10n_br_is_delivery
or self.l10n_br_is_expense
or self.l10n_br_is_insurance
)
def _compute_tax_id(self):
super(PurchaseOrderLine, self)._compute_tax_id()
for line in self:
if line.is_delivery_expense_or_insurance():
line.taxes_id = False
continue
fpos = line.order_id.fiscal_position_id
if not fpos:
continue
line.taxes_id = (
line.taxes_id | fpos.apply_tax_ids
)
def _prepare_account_move_line(self, move=False):
res = super(PurchaseOrderLine, self)._prepare_account_move_line(move)
res.update(
{
"l10n_br_is_delivery": self.l10n_br_is_delivery,
"l10n_br_is_expense": self.l10n_br_is_expense,
"l10n_br_is_insurance": self.l10n_br_is_insurance,
"l10n_br_expense_amount": self.l10n_br_expense_amount,
"l10n_br_insurance_amount": self.l10n_br_insurance_amount,
"quantity": self.product_qty,
}
)
return res
|
the-stack_106_30191 | import os
from unittest import TestCase
from checkov.cloudformation.graph_builder.graph_components.block_types import BlockType
from checkov.cloudformation.graph_manager import CloudformationGraphManager
from checkov.cloudformation.parser import parse
from checkov.common.graph.db_connectors.networkx.networkx_db_connector import NetworkxConnector
TEST_DIRNAME = os.path.dirname(os.path.realpath(__file__))
class TestCloudformationGraphManager(TestCase):
def test_build_graph_from_source_directory_no_rendering(self):
root_dir = os.path.realpath(os.path.join(TEST_DIRNAME, "./runner/resources"))
graph_manager = CloudformationGraphManager(db_connector=NetworkxConnector())
local_graph, definitions = graph_manager.build_graph_from_source_directory(root_dir, render_variables=False)
expected_resources_by_file = {
os.path.join(root_dir, "no_properties.yaml"): [
"AWS::Serverless::Function.NoPropertiesYaml"
],
os.path.join(root_dir, "no_properties.json"): [
"AWS::Serverless::Function.NoPropertiesJson"
],
os.path.join(root_dir, "tags.yaml"): [
"AWS::S3::Bucket.DataBucket",
"AWS::S3::Bucket.NoTags",
"AWS::EKS::Nodegroup.EKSClusterNodegroup",
"AWS::AutoScaling::AutoScalingGroup.TerraformServerAutoScalingGroup",
],
os.path.join(root_dir, "cfn_newline_at_end.yaml"): [
"AWS::RDS::DBInstance.MyDB",
"AWS::S3::Bucket.MyBucket",
],
os.path.join(root_dir, "success.json"): [
"AWS::S3::Bucket.acmeCWSBucket",
"AWS::S3::Bucket.acmeCWSBucket2",
"AWS::S3::BucketPolicy.acmeCWSBucketPolicy",
"AWS::SNS::Topic.acmeCWSTopic",
"AWS::SNS::TopicPolicy.acmeCWSTopicPolicy",
"AWS::CloudTrail::Trail.acmeCWSTrail",
"AWS::KMS::Key.CloudtrailKMSKey",
"AWS::KMS::Alias.CloudtrailKMSKeyAlias",
"AWS::SQS::Queue.acmeCWSQueue",
"AWS::SQS::QueuePolicy.acmeCWSQueuePolicy",
"AWS::SNS::Subscription.acmeCWSSubscription",
"AWS::IAM::Role.acmeCWSSACrossAccountAccessRole",
"AWS::EKS::Cluster.eksCluster",
"Custom::acmeSnsCustomResource.acmeSnsCustomResource",
],
os.path.join(root_dir, "fail.yaml"): [
"AWS::SQS::Queue.UnencryptedQueue",
]
}
self.assertEqual(43, len(local_graph.vertices))
self.assertEqual(23, len(local_graph.vertices_by_block_type[BlockType.RESOURCE]))
self.assertEqual(9, len(local_graph.vertices_by_block_type[BlockType.PARAMETERS]))
self.assertEqual(6, len(local_graph.vertices_by_block_type[BlockType.OUTPUTS]))
self.assertEqual(4, len(local_graph.vertices_by_block_type[BlockType.CONDITIONS]))
self.assertEqual(1, len(local_graph.vertices_by_block_type[BlockType.MAPPINGS]))
for v in local_graph.vertices:
if v.block_type == BlockType.RESOURCE:
self.assertIn(v.name, expected_resources_by_file[v.path])
sqs_queue_vertex = local_graph.vertices[local_graph.vertices_block_name_map[BlockType.RESOURCE]["AWS::SQS::Queue.acmeCWSQueue"][0]]
self.assertDictEqual({'Fn::Join': ['', [{'Ref': 'ResourceNamePrefix', '__startline__': 650, '__endline__': 652}, '-acmecws']], '__startline__': 646, '__endline__': 656}, sqs_queue_vertex.attributes["QueueName"])
def test_build_graph_from_source_directory_with_rendering(self):
root_dir = os.path.realpath(os.path.join(TEST_DIRNAME, "./runner/resources"))
graph_manager = CloudformationGraphManager(db_connector=NetworkxConnector())
local_graph, definitions = graph_manager.build_graph_from_source_directory(root_dir, render_variables=True)
sqs_queue_vertex = local_graph.vertices[local_graph.vertices_block_name_map[BlockType.RESOURCE]["AWS::SQS::Queue.acmeCWSQueue"][0]]
expected_node = {'Fn::Join': ['', ['acme', '-acmecws']], '__startline__': 646, '__endline__': 656}
self.assertDictEqual(expected_node, sqs_queue_vertex.config["QueueName"])
found = False
for d in definitions:
if 'resources/success.json' in d:
found = True
node = definitions[d]['Resources']['acmeCWSQueue']['Properties']['QueueName']
self.assertDictEqual(expected_node, node)
self.assertTrue(found, 'Did not find the wanted node, for acmeCWSQueue')
def test_build_graph_from_definitions(self):
relative_file_path = "./checks/resource/aws/example_APIGatewayXray/APIGatewayXray-PASSED.yaml"
definitions = {}
file = os.path.realpath(os.path.join(TEST_DIRNAME, relative_file_path))
(definitions[relative_file_path], definitions_raw) = parse(file)
graph_manager = CloudformationGraphManager(db_connector=NetworkxConnector())
local_graph = graph_manager.build_graph_from_definitions(definitions)
self.assertEqual(1, len(local_graph.vertices))
resource_vertex = local_graph.vertices[0]
self.assertEqual("AWS::ApiGateway::Stage.Enabled", resource_vertex.name)
self.assertEqual("AWS::ApiGateway::Stage.Enabled", resource_vertex.id)
self.assertEqual(BlockType.RESOURCE, resource_vertex.block_type)
self.assertEqual("CloudFormation", resource_vertex.source)
self.assertDictEqual(definitions[relative_file_path]["Resources"]["Enabled"]["Properties"], resource_vertex.attributes)
|
the-stack_106_30193 | import uuid
from office365.sharepoint.fields.field import Field
from office365.sharepoint.fields.field_creation_information import FieldCreationInformation
from office365.sharepoint.fields.field_type import FieldType
from office365.sharepoint.views.view_field_collection import ViewFieldCollection
from tests import create_unique_name
from tests.sharepoint.sharepoint_case import SPTestCase
from office365.sharepoint.changes.change_query import ChangeQuery
from office365.sharepoint.lists.list import List
from office365.sharepoint.lists.list_creation_information import ListCreationInformation
from office365.sharepoint.lists.list_template_type import ListTemplateType
from office365.sharepoint.views.view import View
from office365.sharepoint.views.view_create_information import ViewCreationInformation
class TestSPView(SPTestCase):
target_list = None # type: List
target_view = None # type: View
target_field = None # type: Field
view_fields_count = None
@classmethod
def setUpClass(cls):
super(TestSPView, cls).setUpClass()
cls.target_list = cls.ensure_list(cls.client.web,
ListCreationInformation("Tasks",
None,
ListTemplateType.Tasks)
)
field_info = FieldCreationInformation("TaskComment_" + uuid.uuid4().hex, FieldType.Note)
cls.target_field = cls.target_list.fields.add(field_info).execute_query()
@classmethod
def tearDownClass(cls):
cls.target_list.delete_object().execute_query()
def test1_get_all_views(self):
all_views = self.target_list.views.get().execute_query()
self.assertGreater(len(all_views), 1)
def test2_create_view(self):
view_properties = ViewCreationInformation()
view_properties.Title = create_unique_name("My Tasks")
view_properties.PersonalView = True
view_properties.Query = "<Where><Eq><FieldRef ID='AssignedTo' /><Value " \
"Type='Integer'><UserID/></Value></Eq></Where> "
new_view = self.target_list.views.add(view_properties).execute_query()
self.assertEqual(view_properties.Title, new_view.properties['Title'])
self.__class__.target_view = new_view
def test3_read_view(self):
view_to_read = self.__class__.target_view.get().execute_query()
self.assertIsNotNone(view_to_read.resource_path)
def test4_render_as_html(self):
result = self.__class__.target_view.render_as_html()
self.client.execute_query()
self.assertIsNotNone(result.value)
def test5_get_default_view_items(self):
view_items = self.target_list.default_view.get_items().get().execute_query()
self.assertIsNotNone(view_items.resource_path)
def test6_get_view_items(self):
view_items = self.__class__.target_view.get_items().get().execute_query()
self.assertIsNotNone(view_items.resource_path)
def test7_update_view(self):
title_updated = self.__class__.target_view.properties["Title"] + "_updated"
view_to_update = self.__class__.target_view
view_to_update.set_property('Title', title_updated).update().execute_query()
result = self.target_list.views.filter("Title eq '{0}'".format(title_updated)).get().execute_query()
self.assertEqual(len(result), 1)
def test8_get_view_fields(self):
view = self.__class__.target_view.expand(["ViewFields"]).get().execute_query()
self.assertIsNotNone(view.view_fields)
self.assertIsInstance(view.view_fields, ViewFieldCollection)
self.__class__.view_fields_count = len(view.view_fields)
def test9_add_view_field(self):
field_name = self.__class__.target_field.internal_name
self.__class__.target_view.view_fields.add_view_field(field_name).execute_query()
after_view_fields = self.__class__.target_view.view_fields.get().execute_query()
self.assertEqual(self.__class__.view_fields_count + 1, len(after_view_fields))
def test_10_move_view_field_to(self):
field_name = self.__class__.target_field.internal_name
self.__class__.target_view.view_fields.move_view_field_to(field_name, 2).execute_query()
after_view_fields = self.__class__.target_view.view_fields.get().execute_query()
self.assertEqual(after_view_fields[2], field_name)
def test_11_remove_view_field(self):
field_name = self.__class__.target_field.internal_name
self.__class__.target_view.view_fields.remove_view_field(field_name).execute_query()
after_view_fields = self.__class__.target_view.view_fields.get().execute_query()
self.assertEqual(self.__class__.view_fields_count, len(after_view_fields))
def test_12_remove_all_view_fields(self):
self.__class__.target_view.view_fields.remove_all_view_fields().execute_query()
after_view_fields = self.__class__.target_view.view_fields.get().execute_query()
self.assertEqual(0, len(after_view_fields))
def test_13_get_view_changes(self):
changes = self.client.site.get_changes(ChangeQuery(view=True)).execute_query()
self.assertGreater(len(changes), 0)
def test_14_delete_view(self):
view_to_delete = self.__class__.target_view
view_to_delete.delete_object().execute_query()
|
the-stack_106_30194 | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_location_header_result_request(
subscription_id: str,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
operation_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-06-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/operationResults/{operationId}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"sqlPoolName": _SERIALIZER.url("sql_pool_name", sql_pool_name, 'str'),
"operationId": _SERIALIZER.url("operation_id", operation_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class SqlPoolOperationResultsOperations(object):
"""SqlPoolOperationResultsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_location_header_result(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
operation_id: str,
**kwargs: Any
) -> Any:
"""Get SQL pool operation status.
Get the status of a SQL pool operation.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param operation_id: Operation ID.
:type operation_id: str
:keyword api_version: Api Version. Default value is "2021-06-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: any, or the result of cls(response)
:rtype: any
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Any]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01") # type: str
request = build_get_location_header_result_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
operation_id=operation_id,
api_version=api_version,
template_url=self.get_location_header_result.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('object', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_location_header_result.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/operationResults/{operationId}"} # type: ignore
|
the-stack_106_30196 | from django.db.models import Q, Sum, OuterRef, Subquery, F, Value, Case, When, Max
from rest_framework.request import Request
from rest_framework.response import Response
from typing import Any
from usaspending_api.accounts.models.appropriation_account_balances import AppropriationAccountBalances
from usaspending_api.agency.v2.views.agency_base import AgencyBase, PaginationMixin
from usaspending_api.common.cache_decorator import cache_response
from usaspending_api.common.helpers.date_helper import now
from usaspending_api.common.helpers.generic_helper import get_pagination_metadata, sort_with_null_last
from usaspending_api.common.helpers.orm_helpers import ConcatAll
from usaspending_api.financial_activities.models import FinancialAccountsByProgramActivityObjectClass
from usaspending_api.references.models import BureauTitleLookup
from usaspending_api.submissions.models import SubmissionAttributes
class SubcomponentList(PaginationMixin, AgencyBase):
"""
Obtain the count of subcomponents (bureaus) for a specific agency in a single
fiscal year based on GTAS
"""
endpoint_doc = "usaspending_api/api_contracts/contracts/v2/agency/toptier_code/sub_components.md"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.params_to_validate = ["toptier_code", "fiscal_year"]
@cache_response()
def get(self, request: Request, *args: Any, **kwargs: Any) -> Response:
self.sortable_columns = ["name", "total_obligations", "total_outlays", "total_budgetary_resources"]
self.default_sort_column = "total_obligations"
results = self.format_results(self.get_file_a_queryset(), self.get_file_b_queryset())
page_metadata = get_pagination_metadata(len(results), self.pagination.limit, self.pagination.page)
return Response(
{
"toptier_code": self.toptier_code,
"fiscal_year": self.fiscal_year,
"results": results[self.pagination.lower_limit : self.pagination.upper_limit],
"messages": self.standard_response_messages,
"page_metadata": page_metadata,
}
)
def format_results(self, file_a_response, file_b_response):
# Combine File A and B Responses
combined_list_dict = {}
for row in file_a_response:
combined_list_dict[row["bureau_info"]] = row
for row in file_b_response:
if row["bureau_info"] not in combined_list_dict:
combined_list_dict[row["bureau_info"]] = row
else:
combined_list_dict[row["bureau_info"]].update(row)
combined_response = [value for key, value in combined_list_dict.items()]
# Format Combined Response
results = sort_with_null_last(
to_sort=[
{
"name": x["bureau_info"].split(";")[0] if x.get("bureau_info") is not None else None,
"id": x["bureau_info"].split(";")[1] if x.get("bureau_info") is not None else None,
"total_obligations": x["total_obligations"] if x["total_obligations"] else None,
"total_outlays": x["total_outlays"] if x["total_outlays"] else None,
"total_budgetary_resources": x["total_budgetary_resources"]
if x["total_budgetary_resources"]
else None,
}
for x in combined_response
],
sort_key=self.pagination.sort_key,
sort_order=self.pagination.sort_order,
)
return results
def get_file_a_queryset(self):
"""
Query Total Budgetary Resources per Bureau from File A for a single Period
"""
filters, bureau_info_subquery = self.get_common_query_objects("treasury_account_identifier")
results = (
(AppropriationAccountBalances.objects.filter(*filters))
.annotate(bureau_info=bureau_info_subquery)
.values("bureau_info")
.annotate(
total_budgetary_resources=Sum("total_budgetary_resources_amount_cpe"),
)
.values("bureau_info", "total_budgetary_resources")
)
return results
def get_file_b_queryset(self):
"""
Query Obligations and Outlays per Bureau from File B for a single Period
"""
filters, bureau_info_subquery = self.get_common_query_objects("treasury_account")
results = (
(FinancialAccountsByProgramActivityObjectClass.objects.filter(*filters))
.annotate(bureau_info=bureau_info_subquery)
.values("bureau_info")
.annotate(
total_obligations=Sum("obligations_incurred_by_program_object_class_cpe"),
total_outlays=Sum("gross_outlay_amount_by_program_object_class_cpe"),
)
.values("bureau_info", "total_obligations", "total_outlays")
)
return results
def get_common_query_objects(self, treasury_account_keyword):
latest = (
SubmissionAttributes.objects.filter(
submission_window__submission_reveal_date__lte=now(), reporting_fiscal_year=self.fiscal_year
)
.values("reporting_fiscal_year")
.annotate(max_fiscal_period=Max(F("reporting_fiscal_period")))
.values("max_fiscal_period")
)
filters = [
Q(**{f"{treasury_account_keyword}__federal_account__parent_toptier_agency": self.toptier_agency}),
Q(submission__reporting_fiscal_year=self.fiscal_year),
Q(submission__reporting_fiscal_period=latest[0]["max_fiscal_period"]),
]
bureau_info_subquery = Subquery(
BureauTitleLookup.objects.filter(
federal_account_code=OuterRef(f"{treasury_account_keyword}__federal_account__federal_account_code")
)
.annotate(
bureau_info=Case(
When(
federal_account_code__startswith="057",
then=ConcatAll(Value("Air Force"), Value(";"), Value("air-force")),
),
When(
federal_account_code__startswith="021",
then=ConcatAll(Value("Army"), Value(";"), Value("army")),
),
When(
federal_account_code__startswith="017",
then=ConcatAll(Value("Navy, Marine Corps"), Value(";"), Value("navy-marine-corps")),
),
When(
federal_account_code__startswith="097",
then=ConcatAll(Value("Defense-wide"), Value(";"), Value("defense-wide")),
),
default=ConcatAll(F("bureau_title"), Value(";"), F("bureau_slug")),
)
)
.values("bureau_info")
)
return filters, bureau_info_subquery
|
the-stack_106_30198 | import random
from service import formatter
from . import GeorefLiveTest, asciifold
class SearchStreetsTest(GeorefLiveTest):
"""
Pruebas de búsqueda de calles.
Ir al archivo test_addresses.py para ver los tests de búsqueda de calles
por dirección (nombre + altura).
"""
def setUp(self):
self.endpoint = '/api/v1.0/calles'
self.entity = 'calles'
super().setUp()
def test_max_results_returned(self):
"""La cantidad máxima de resultados debe ser configurable."""
lengths = [1, 4, 9, 10, 20]
results_lengths = [
len(self.get_response({'max': length}))
for length in lengths
]
self.assertListEqual(lengths, results_lengths)
def test_id_length(self):
"""El ID de la entidad debe tener la longitud correcta."""
data = self.get_response({'max': 1})[0]
self.assertTrue(len(data['id']) == 13)
def test_name_ordering(self):
"""Los resultados deben poder ser ordenados por nombre."""
resp = self.get_response({
'orden': 'nombre',
'max': 100
})
ordered = [r['nombre'] for r in resp]
expected = sorted(ordered, key=asciifold)
self.assertListEqual(ordered, expected)
def test_id_ordering(self):
"""Los resultados deben poder ser ordenados por ID."""
resp = self.get_response({
'orden': 'id',
'max': 1000
})
ordered = [r['id'] for r in resp]
expected = sorted(ordered)
self.assertListEqual(ordered, expected)
def test_pagination(self):
"""Los resultados deberían poder ser paginados."""
page_size = 50
pages = 5
results = set()
for i in range(pages):
resp = self.get_response({
'inicio': i * page_size,
'max': page_size
})
for result in resp:
results.add(result['id'])
# Si el paginado funciona correctamente, no deberían haberse repetido
# IDs de entidades entre resultados.
self.assertEqual(len(results), page_size * pages)
def test_total_results(self):
"""Dada una query sin parámetros, se deben retornar los metadatos de
resultados apropiados."""
resp = self.get_response(return_value='full')
self.assertTrue(resp['cantidad'] == 10 and resp['inicio'] == 0)
def test_filter_results_fields(self):
"""Los campos de las direcciones devueltas deben ser filtrables."""
fields_lists = [
['fuente', 'id', 'nombre'],
['fuente', 'id', 'nombre', 'nomenclatura'],
['departamento.nombre', 'fuente', 'id', 'nombre'],
['fuente', 'id', 'altura.inicio.derecha', 'nombre'],
]
fields_lists = [sorted(l) for l in fields_lists]
fields_results = []
for fields in fields_lists:
data = self.get_response({
'campos': ','.join(fields),
'max': 1
})
formatter.flatten_dict(data[0], sep='.')
fields_results.append(sorted(data[0].keys()))
self.assertListEqual(fields_lists, fields_results)
def test_basic_fields_set(self):
"""Se debería poder especificar un conjunto de parámetros
preseleccionados llamado 'basico'."""
self.assert_fields_set_equals('basico', ['id', 'nombre'])
def test_standard_fields_set(self):
"""Se debería poder especificar un conjunto de parámetros
preseleccionados llamado 'estandar'."""
self.assert_fields_set_equals('estandar',
['id', 'nombre',
'altura.fin.derecha',
'altura.fin.izquierda',
'altura.inicio.derecha',
'altura.inicio.izquierda',
'departamento.id',
'departamento.nombre',
'localidad_censal.id',
'localidad_censal.nombre',
'nomenclatura',
'provincia.id', 'provincia.nombre',
'categoria'])
def test_complete_fields_set(self):
"""Se debería poder especificar un conjunto de parámetros
preseleccionados llamado 'completo'."""
self.assert_fields_set_equals('completo',
['id', 'fuente', 'nombre',
'altura.fin.derecha',
'altura.fin.izquierda',
'altura.inicio.derecha',
'altura.inicio.izquierda',
'departamento.id',
'departamento.nombre',
'localidad_censal.id',
'localidad_censal.nombre',
'nomenclatura',
'provincia.id', 'provincia.nombre',
'categoria'])
def test_field_prefixes(self):
"""Se debería poder especificar prefijos de otros campos como campos
a incluir en la respuesta."""
self.assert_fields_set_equals('altura', ['id', 'nombre',
'altura.fin.derecha',
'altura.fin.izquierda',
'altura.inicio.derecha',
'altura.inicio.izquierda'])
def assert_street_search_id_matches(self, term_matches, exact=False):
results = []
for code, query in term_matches:
params = {'nombre': query, 'provincia': code[0][:2]}
if exact:
params['exacto'] = 1
res = self.get_response(params)
results.append(sorted([p['id'] for p in res]))
self.assertListEqual([sorted(ids) for ids, _ in term_matches], results)
def test_name_exact_gibberish_search(self):
"""La búsqueda exacta debe devolver 0 resultados cuando se utiliza un
nombre no existente."""
data = self.get_response({'nombre': 'FoobarFoobar', 'exacto': 1})
self.assertTrue(len(data) == 0)
def test_search_street_type(self):
"""Se debe poder especificar el tipo de calle en la búsqueda."""
validations = []
street_types = [
('AV', 'avenida'),
('RUTA', 'ruta'),
('AUT', 'autopista'),
('CALLE', 'calle'),
('PJE', 'pasaje')
]
for street_type, street_type_long in street_types:
res = self.get_response({
'categoria': street_type_long,
'max': 100
})
validations.append(len(res) > 0)
validations.append(all(
street['categoria'] == street_type for street in res
))
assert(validations and all(validations))
def test_id_search(self):
"""Se debería poder buscar calles por ID."""
identifier = '8208416001280'
data = self.get_response({'id': identifier})[0]
self.assertEqual(identifier, data['id'])
def test_flatten_results(self):
"""Los resultados se deberían poder obtener en formato aplanado."""
data = self.get_response({'max': 1, 'aplanar': True})[0]
self.assertTrue(all([
not isinstance(v, dict) for v in data.values()
]) and data)
def test_bulk_response_len(self):
"""La longitud de la respuesta bulk debería ser igual a la cantidad
de queries envíadas."""
req_len = random.randint(10, 20)
query = {
'nombre': 'SANTA FE'
}
body = {
'calles': [query] * req_len
}
results = self.get_response(method='POST', body=body)
self.assertEqual(len(results), req_len)
def test_bulk_basic(self):
"""La búsqueda de una query sin parámetros debería funcionar
correctamente."""
results = self.get_response(method='POST', body={
'calles': [{}]
})
first = results[0]
self.assertTrue(len(results) == 1 and len(first['calles']) == 10)
def test_bulk_equivalent(self):
"""Los resultados de una query envíada vía bulk deberían ser idénticos a
los resultados de una query individual (GET)."""
queries = [
{
'nombre': 'CORRIENTES'
},
{
'categoria': 'avenida'
},
{
'max': 3
},
{
'id': '8208416001280'
},
{
'campos': 'nombre,categoria'
},
{
'provincia': '02'
},
{
'departamento': '06805'
},
{
'exacto': True,
'nombre': 'LISANDRO DE LA TORRE'
}
]
individual_results = []
for query in queries:
individual_results.append(self.get_response(params=query,
return_value='full'))
bulk_results = self.get_response(method='POST', body={
'calles': queries
})
self.assertEqual(individual_results, bulk_results)
def test_json_format(self):
"""Por default, los resultados de una query deberían estar en
formato JSON."""
default_response = self.get_response()
json_response = self.get_response({'formato': 'json'})
self.assertEqual(default_response, json_response)
def test_csv_format(self):
"""Se debería poder obtener resultados en formato
CSV (sin parámetros)."""
self.assert_valid_csv()
def test_csv_format_query(self):
"""Se debería poder obtener resultados en formato
CSV (con parámetros)."""
self.assert_valid_csv({
'nombre': 'SANTA FE',
'campos': 'nombre,id,categoria'
})
def test_csv_fields(self):
"""Una consulta CSV debería tener ciertos campos, ordenados de una
forma específica."""
resp = self.get_response({'formato': 'csv', 'campos': 'completo'})
headers = next(resp)
self.assertListEqual(headers, ['calle_id',
'calle_nombre',
'calle_altura_inicio_derecha',
'calle_altura_inicio_izquierda',
'calle_altura_fin_derecha',
'calle_altura_fin_izquierda',
'calle_nomenclatura',
'calle_categoria',
'provincia_id',
'provincia_nombre',
'departamento_id',
'departamento_nombre',
'localidad_censal_id',
'localidad_censal_nombre',
'calle_fuente'])
def test_xml_format(self):
"""Se debería poder obtener resultados en formato XML (sin
parámetros)."""
self.assert_valid_xml()
def test_xml_format_query(self):
"""Se debería poder obtener resultados en formato XML (con
parámetros)."""
self.assert_valid_xml({
'max': 100,
'nombre': 'mayo',
'categoria': 'avenida'
})
def test_shp_format(self):
"""Se debería poder obtener resultados en formato SHP (sin
parámetros)."""
self.assert_valid_shp_type(
shape_type=3, # 3 == POLYLINE
params={'max': 1}
)
def test_shp_format_query(self):
"""Se debería poder obtener resultados en formato SHP (con
parámetros)."""
self.assert_valid_shp_query({
'max': 500,
'campos': 'completo',
'categoria': 'avenida'
})
def test_shp_record_fields(self):
"""Los campos obtenidos en formato SHP deberían ser los esperados y
deberían corresponder a los campos obtenidos en otros formatos."""
self.assert_shp_fields('completo', [
'nombre',
'nomencla',
'id',
'prov_id',
'prov_nombre',
'dpto_nombre',
'dpto_id',
'categoria',
'alt_ini_der',
'alt_ini_izq',
'alt_fin_der',
'alt_fin_izq',
'fuente',
'lcen_id',
'lcen_nombre'
])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.