ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3627e2c3e2f113328f66720a3d17b78b15e530 | import os
import time
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Rectangle
from source import utils
from source.constants import Constants
class DataPlotBuilder(object):
@staticmethod
def timestamp_to_string(ts):
return time.strftime('%H:%M:%S', time.localtime(ts))
@staticmethod
def convert_labels_for_hypnogram(labels):
processed_labels = np.array([])
for epoch in labels:
if epoch == -1:
processed_labels = np.append(processed_labels, 0)
elif epoch == 5:
processed_labels = np.append(processed_labels, 1)
else:
processed_labels = np.append(processed_labels, -1 * epoch)
return processed_labels
@staticmethod
def tidy_data_plot(x_min, x_max, dt, ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
xticks = np.arange(x_min, x_max, dt)
plt.xticks(xticks)
labels = []
for xt in xticks:
labels.append(DataPlotBuilder.timestamp_to_string(xt))
ax.set_xticklabels(labels)
plt.xlim(x_min, x_max)
@staticmethod
def make_data_demo(subject_id="16", snippet=False):
hr_color = [0.8, 0.2, 0.1]
motion_color = [0.3, 0.2, 0.8]
circ_color = [0.9, 0.7, 0]
psg_color = [0.1, 0.7, 0.1]
font_size = 16
font_name = "Arial"
data_path = str(Constants.CROPPED_FILE_PATH) + '/'
circadian_data_path = str(utils.get_project_root().joinpath('data/circadian_predictions/')) + '/'
output_path = str(Constants.FIGURE_FILE_PATH) + '/'
if snippet is False:
fig = plt.figure(figsize=(10, 12))
else:
fig = plt.figure(figsize=(3, 12))
num_v_plots = 5
fig.patch.set_facecolor('white')
if (os.path.isfile(data_path + subject_id + '_cleaned_hr.out') and os.path.isfile(
data_path + subject_id + '_cleaned_motion.out') and os.path.isfile(
data_path + subject_id + '_cleaned_psg.out') and
os.path.isfile(data_path + subject_id + '_cleaned_counts.out') and
os.stat(data_path + subject_id + '_cleaned_motion.out').st_size > 0) and os.path.isfile(
circadian_data_path + subject_id + '_clock_proxy.txt'):
hr = np.genfromtxt(data_path + subject_id + '_cleaned_hr.out', delimiter=' ')
motion = np.genfromtxt(data_path + subject_id + '_cleaned_motion.out', delimiter=' ')
scores = np.genfromtxt(data_path + subject_id + '_cleaned_psg.out', delimiter=' ')
counts = np.genfromtxt(data_path + subject_id + '_cleaned_counts.out', delimiter=',')
circ_model = np.genfromtxt(circadian_data_path + subject_id + '_clock_proxy.txt', delimiter=',')
min_time = min(scores[:, 0])
max_time = max(scores[:, 0])
dt = 60 * 60
sample_point_fraction = 0.92
sample_point = sample_point_fraction * (max_time - min_time) + min_time
window_size = 10
if snippet:
min_time = sample_point
max_time = sample_point + window_size
ax = plt.subplot(num_v_plots, 1, 1)
ax.plot(motion[:, 0], motion[:, 1], color=motion_color)
ax.plot(motion[:, 0], motion[:, 2], color=[0.4, 0.2, 0.7])
ax.plot(motion[:, 0], motion[:, 3], color=[0.5, 0.2, 0.6])
plt.ylabel('Motion (g)', fontsize=font_size, fontname=font_name)
DataPlotBuilder.tidy_data_plot(min_time, max_time, dt, ax)
if snippet:
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
ax.spines['top'].set_visible(True)
ax.spines['right'].set_visible(True)
ax.yaxis.label.set_visible(False)
inds = np.intersect1d(np.where(motion[:, 0] > sample_point)[0],
np.where(motion[:, 0] <= sample_point + window_size)[0])
y_min = np.amin(motion[inds, 1:3])
plt.ylim(y_min - 0.005, y_min + 0.025)
# Get rid of the ticks
ax.set_xticks([])
ax.yaxis.set_ticks_position("right")
plt.ylabel('')
plt.xlabel(str(window_size) + ' sec window', fontsize=font_size, fontname=font_name)
else:
y_min = -3.2
y_max = 2.5
plt.ylim(y_min, y_max)
current_axis = plt.gca()
current_axis.add_patch(
Rectangle((sample_point, y_min), window_size, y_max - y_min, alpha=0.7, facecolor="gray"))
ax = plt.subplot(num_v_plots, 1, 2)
ax.plot(counts[:, 0], counts[:, 1], color=[0.2, 0.2, 0.7])
DataPlotBuilder.tidy_data_plot(min_time, max_time, dt, ax)
plt.ylabel('Counts', fontsize=font_size, fontname=font_name)
if snippet:
plt.axis('off')
plt.ylim(-1, -1)
ax = plt.subplot(num_v_plots, 1, 3)
ax.plot(hr[:, 0], hr[:, 1], color=hr_color)
plt.ylabel('Heart rate (bpm)', fontsize=font_size, fontname=font_name)
DataPlotBuilder.tidy_data_plot(min_time, max_time, dt, ax)
sample_point = sample_point_fraction * (max_time - min_time) + min_time
window_size = 1200
if snippet:
min_time = sample_point
max_time = sample_point + window_size
DataPlotBuilder.tidy_data_plot(min_time, max_time, dt, ax)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
ax.spines['top'].set_visible(True)
ax.spines['right'].set_visible(True)
ax.yaxis.label.set_visible(False)
ax.set_xticks([])
ax.yaxis.set_ticks_position("right")
plt.ylabel('')
plt.xlabel(str(window_size) + ' sec window', fontsize=font_size, fontname=font_name)
plt.ylim(35, 100)
else:
y_min = 40
y_max = 130
plt.ylim(y_min, y_max)
current_axis = plt.gca()
current_axis.add_patch(
Rectangle((sample_point, y_min), window_size, y_max - y_min, alpha=0.35, facecolor="gray"))
plt.ylim(40, 130)
ax = plt.subplot(num_v_plots, 1, 4)
ax.plot(circ_model[:, 0], -circ_model[:, 1], color=circ_color)
plt.ylabel('Clock Proxy', fontsize=font_size, fontname=font_name)
DataPlotBuilder.tidy_data_plot(min_time, max_time, dt, ax)
if snippet:
plt.axis('off')
plt.ylim(-1, -1)
else:
plt.ylim(.2, 1.2)
ax = plt.subplot(num_v_plots, 1, 5)
relabeled_scores = DataPlotBuilder.convert_labels_for_hypnogram(scores[:, 1])
ax.step(scores[:, 0], relabeled_scores, color=psg_color)
plt.ylabel('Stage', fontsize=font_size, fontname=font_name)
plt.xlabel('Time', fontsize=font_size, fontname=font_name)
DataPlotBuilder.tidy_data_plot(min_time, max_time, dt, ax)
ax.set_yticks([-4, -3, -2, -1, 0, 1])
ax.set_yticklabels(['N4', 'N3', 'N2', 'N1', 'Wake', 'REM'])
if snippet:
plt.axis('off')
plt.ylim(5, 5)
else:
plt.ylim(-5, 2)
if not snippet:
plt.savefig(output_path + 'data_validation_' + subject_id + '.png', bbox_inches='tight', pad_inches=0.1,
dpi=300)
else:
plt.savefig(output_path + 'data_validation_zoom_' + subject_id + '.png', bbox_inches='tight',
pad_inches=0.1, dpi=300)
plt.close()
|
py | 1a3628458c483d326ff5fa11d60d5d1cb7b19a7f | """Utility functions with no non-trivial dependencies."""
import os
import pathlib
import re
import subprocess
import sys
import hashlib
import io
import shutil
import time
from typing import (
TypeVar, List, Tuple, Optional, Dict, Sequence, Iterable, Container, IO, Callable
)
from typing_extensions import Final, Type, Literal
try:
import curses
import _curses # noqa
CURSES_ENABLED = True
except ImportError:
CURSES_ENABLED = False
T = TypeVar('T')
ENCODING_RE: Final = re.compile(br"([ \t\v]*#.*(\r\n?|\n))??[ \t\v]*#.*coding[:=][ \t]*([-\w.]+)")
DEFAULT_SOURCE_OFFSET: Final = 4
DEFAULT_COLUMNS: Final = 80
# At least this number of columns will be shown on each side of
# error location when printing source code snippet.
MINIMUM_WIDTH: Final = 20
# VT100 color code processing was added in Windows 10, but only the second major update,
# Threshold 2. Fortunately, everyone (even on LTSB, Long Term Support Branch) should
# have a version of Windows 10 newer than this. Note that Windows 8 and below are not
# supported, but are either going out of support, or make up only a few % of the market.
MINIMUM_WINDOWS_MAJOR_VT100: Final = 10
MINIMUM_WINDOWS_BUILD_VT100: Final = 10586
default_python2_interpreter: Final = [
"python2",
"python",
"/usr/bin/python",
"C:\\Python27\\python.exe",
]
SPECIAL_DUNDERS: Final = frozenset((
"__init__", "__new__", "__call__", "__init_subclass__", "__class_getitem__",
))
def is_dunder(name: str, exclude_special: bool = False) -> bool:
"""Returns whether name is a dunder name.
Args:
exclude_special: Whether to return False for a couple special dunder methods.
"""
if exclude_special and name in SPECIAL_DUNDERS:
return False
return name.startswith("__") and name.endswith("__")
def is_sunder(name: str) -> bool:
return not is_dunder(name) and name.startswith('_') and name.endswith('_')
def split_module_names(mod_name: str) -> List[str]:
"""Return the module and all parent module names.
So, if `mod_name` is 'a.b.c', this function will return
['a.b.c', 'a.b', and 'a'].
"""
out = [mod_name]
while '.' in mod_name:
mod_name = mod_name.rsplit('.', 1)[0]
out.append(mod_name)
return out
def module_prefix(modules: Iterable[str], target: str) -> Optional[str]:
result = split_target(modules, target)
if result is None:
return None
return result[0]
def split_target(modules: Iterable[str], target: str) -> Optional[Tuple[str, str]]:
remaining: List[str] = []
while True:
if target in modules:
return target, '.'.join(remaining)
components = target.rsplit('.', 1)
if len(components) == 1:
return None
target = components[0]
remaining.insert(0, components[1])
def short_type(obj: object) -> str:
"""Return the last component of the type name of an object.
If obj is None, return 'nil'. For example, if obj is 1, return 'int'.
"""
if obj is None:
return 'nil'
t = str(type(obj))
return t.split('.')[-1].rstrip("'>")
def find_python_encoding(text: bytes, pyversion: Tuple[int, int]) -> Tuple[str, int]:
"""PEP-263 for detecting Python file encoding"""
result = ENCODING_RE.match(text)
if result:
line = 2 if result.group(1) else 1
encoding = result.group(3).decode('ascii')
# Handle some aliases that Python is happy to accept and that are used in the wild.
if encoding.startswith(('iso-latin-1-', 'latin-1-')) or encoding == 'iso-latin-1':
encoding = 'latin-1'
return encoding, line
else:
default_encoding = 'utf8' if pyversion[0] >= 3 else 'ascii'
return default_encoding, -1
def bytes_to_human_readable_repr(b: bytes) -> str:
"""Converts bytes into some human-readable representation. Unprintable
bytes such as the nul byte are escaped. For example:
>>> b = bytes([102, 111, 111, 10, 0])
>>> s = bytes_to_human_readable_repr(b)
>>> print(s)
foo\n\x00
>>> print(repr(s))
'foo\\n\\x00'
"""
return repr(b)[2:-1]
class DecodeError(Exception):
"""Exception raised when a file cannot be decoded due to an unknown encoding type.
Essentially a wrapper for the LookupError raised by `bytearray.decode`
"""
def decode_python_encoding(source: bytes, pyversion: Tuple[int, int]) -> str:
"""Read the Python file with while obeying PEP-263 encoding detection.
Returns the source as a string.
"""
# check for BOM UTF-8 encoding and strip it out if present
if source.startswith(b'\xef\xbb\xbf'):
encoding = 'utf8'
source = source[3:]
else:
# look at first two lines and check if PEP-263 coding is present
encoding, _ = find_python_encoding(source, pyversion)
try:
source_text = source.decode(encoding)
except LookupError as lookuperr:
raise DecodeError(str(lookuperr)) from lookuperr
return source_text
def read_py_file(path: str, read: Callable[[str], bytes],
pyversion: Tuple[int, int]) -> Optional[List[str]]:
"""Try reading a Python file as list of source lines.
Return None if something goes wrong.
"""
try:
source = read(path)
except OSError:
return None
else:
try:
source_lines = decode_python_encoding(source, pyversion).splitlines()
except DecodeError:
return None
return source_lines
def trim_source_line(line: str, max_len: int, col: int, min_width: int) -> Tuple[str, int]:
"""Trim a line of source code to fit into max_len.
Show 'min_width' characters on each side of 'col' (an error location). If either
start or end is trimmed, this is indicated by adding '...' there.
A typical result looks like this:
...some_variable = function_to_call(one_arg, other_arg) or...
Return the trimmed string and the column offset to to adjust error location.
"""
if max_len < 2 * min_width + 1:
# In case the window is too tiny it is better to still show something.
max_len = 2 * min_width + 1
# Trivial case: line already fits in.
if len(line) <= max_len:
return line, 0
# If column is not too large so that there is still min_width after it,
# the line doesn't need to be trimmed at the start.
if col + min_width < max_len:
return line[:max_len] + '...', 0
# Otherwise, if the column is not too close to the end, trim both sides.
if col < len(line) - min_width - 1:
offset = col - max_len + min_width + 1
return '...' + line[offset:col + min_width + 1] + '...', offset - 3
# Finally, if the column is near the end, just trim the start.
return '...' + line[-max_len:], len(line) - max_len - 3
def get_mypy_comments(source: str) -> List[Tuple[int, str]]:
PREFIX = '# mypy: '
# Don't bother splitting up the lines unless we know it is useful
if PREFIX not in source:
return []
lines = source.split('\n')
results = []
for i, line in enumerate(lines):
if line.startswith(PREFIX):
results.append((i + 1, line[len(PREFIX):]))
return results
_python2_interpreter: Optional[str] = None
def try_find_python2_interpreter() -> Optional[str]:
global _python2_interpreter
if _python2_interpreter:
return _python2_interpreter
for interpreter in default_python2_interpreter:
try:
retcode = subprocess.Popen([
interpreter, '-c',
'import sys, typing; assert sys.version_info[:2] == (2, 7)'
]).wait()
if not retcode:
_python2_interpreter = interpreter
return interpreter
except OSError:
pass
return None
PASS_TEMPLATE: Final = """<?xml version="1.0" encoding="utf-8"?>
<testsuite errors="0" failures="0" name="mypy" skips="0" tests="1" time="{time:.3f}">
<testcase classname="mypy" file="mypy" line="1" name="mypy-py{ver}-{platform}" time="{time:.3f}">
</testcase>
</testsuite>
"""
FAIL_TEMPLATE: Final = """<?xml version="1.0" encoding="utf-8"?>
<testsuite errors="0" failures="1" name="mypy" skips="0" tests="1" time="{time:.3f}">
<testcase classname="mypy" file="mypy" line="1" name="mypy-py{ver}-{platform}" time="{time:.3f}">
<failure message="mypy produced messages">{text}</failure>
</testcase>
</testsuite>
"""
ERROR_TEMPLATE: Final = """<?xml version="1.0" encoding="utf-8"?>
<testsuite errors="1" failures="0" name="mypy" skips="0" tests="1" time="{time:.3f}">
<testcase classname="mypy" file="mypy" line="1" name="mypy-py{ver}-{platform}" time="{time:.3f}">
<error message="mypy produced errors">{text}</error>
</testcase>
</testsuite>
"""
def write_junit_xml(dt: float, serious: bool, messages: List[str], path: str,
version: str, platform: str) -> None:
from xml.sax.saxutils import escape
if not messages and not serious:
xml = PASS_TEMPLATE.format(time=dt, ver=version, platform=platform)
elif not serious:
xml = FAIL_TEMPLATE.format(text=escape('\n'.join(messages)), time=dt,
ver=version, platform=platform)
else:
xml = ERROR_TEMPLATE.format(text=escape('\n'.join(messages)), time=dt,
ver=version, platform=platform)
# checks for a directory structure in path and creates folders if needed
xml_dirs = os.path.dirname(os.path.abspath(path))
if not os.path.isdir(xml_dirs):
os.makedirs(xml_dirs)
with open(path, 'wb') as f:
f.write(xml.encode('utf-8'))
class IdMapper:
"""Generate integer ids for objects.
Unlike id(), these start from 0 and increment by 1, and ids won't
get reused across the life-time of IdMapper.
Assume objects don't redefine __eq__ or __hash__.
"""
def __init__(self) -> None:
self.id_map: Dict[object, int] = {}
self.next_id = 0
def id(self, o: object) -> int:
if o not in self.id_map:
self.id_map[o] = self.next_id
self.next_id += 1
return self.id_map[o]
def get_prefix(fullname: str) -> str:
"""Drop the final component of a qualified name (e.g. ('x.y' -> 'x')."""
return fullname.rsplit('.', 1)[0]
def get_top_two_prefixes(fullname: str) -> Tuple[str, str]:
"""Return one and two component prefixes of a fully qualified name.
Given 'a.b.c.d', return ('a', 'a.b').
If fullname has only one component, return (fullname, fullname).
"""
components = fullname.split('.', 3)
return components[0], '.'.join(components[:2])
def correct_relative_import(cur_mod_id: str,
relative: int,
target: str,
is_cur_package_init_file: bool) -> Tuple[str, bool]:
if relative == 0:
return target, True
parts = cur_mod_id.split(".")
rel = relative
if is_cur_package_init_file:
rel -= 1
ok = len(parts) >= rel
if rel != 0:
cur_mod_id = ".".join(parts[:-rel])
return cur_mod_id + (("." + target) if target else ""), ok
fields_cache: Final[Dict[Type[object], List[str]]] = {}
def get_class_descriptors(cls: 'Type[object]') -> Sequence[str]:
import inspect # Lazy import for minor startup speed win
# Maintain a cache of type -> attributes defined by descriptors in the class
# (that is, attributes from __slots__ and C extension classes)
if cls not in fields_cache:
members = inspect.getmembers(
cls,
lambda o: inspect.isgetsetdescriptor(o) or inspect.ismemberdescriptor(o))
fields_cache[cls] = [x for x, y in members if x != '__weakref__' and x != '__dict__']
return fields_cache[cls]
def replace_object_state(new: object, old: object, copy_dict: bool = False) -> None:
"""Copy state of old node to the new node.
This handles cases where there is __dict__ and/or attribute descriptors
(either from slots or because the type is defined in a C extension module).
Assume that both objects have the same __class__.
"""
if hasattr(old, '__dict__'):
if copy_dict:
new.__dict__ = dict(old.__dict__)
else:
new.__dict__ = old.__dict__
for attr in get_class_descriptors(old.__class__):
try:
if hasattr(old, attr):
setattr(new, attr, getattr(old, attr))
elif hasattr(new, attr):
delattr(new, attr)
# There is no way to distinguish getsetdescriptors that allow
# writes from ones that don't (I think?), so we just ignore
# AttributeErrors if we need to.
# TODO: What about getsetdescriptors that act like properties???
except AttributeError:
pass
def is_sub_path(path1: str, path2: str) -> bool:
"""Given two paths, return if path1 is a sub-path of path2."""
return pathlib.Path(path2) in pathlib.Path(path1).parents
def hard_exit(status: int = 0) -> None:
"""Kill the current process without fully cleaning up.
This can be quite a bit faster than a normal exit() since objects are not freed.
"""
sys.stdout.flush()
sys.stderr.flush()
os._exit(status)
def unmangle(name: str) -> str:
"""Remove internal suffixes from a short name."""
return name.rstrip("'")
def get_unique_redefinition_name(name: str, existing: Container[str]) -> str:
"""Get a simple redefinition name not present among existing.
For example, for name 'foo' we try 'foo-redefinition', 'foo-redefinition2',
'foo-redefinition3', etc. until we find one that is not in existing.
"""
r_name = name + '-redefinition'
if r_name not in existing:
return r_name
i = 2
while r_name + str(i) in existing:
i += 1
return r_name + str(i)
def check_python_version(program: str) -> None:
"""Report issues with the Python used to run mypy, dmypy, or stubgen"""
# Check for known bad Python versions.
if sys.version_info[:2] < (3, 6):
sys.exit("Running {name} with Python 3.5 or lower is not supported; "
"please upgrade to 3.6 or newer".format(name=program))
def count_stats(messages: List[str]) -> Tuple[int, int, int]:
"""Count total number of errors, notes and error_files in message list."""
errors = [e for e in messages if ': error:' in e]
error_files = {e.split(':')[0] for e in errors}
notes = [e for e in messages if ': note:' in e]
return len(errors), len(notes), len(error_files)
def split_words(msg: str) -> List[str]:
"""Split line of text into words (but not within quoted groups)."""
next_word = ''
res: List[str] = []
allow_break = True
for c in msg:
if c == ' ' and allow_break:
res.append(next_word)
next_word = ''
continue
if c == '"':
allow_break = not allow_break
next_word += c
res.append(next_word)
return res
def get_terminal_width() -> int:
"""Get current terminal width if possible, otherwise return the default one."""
return (int(os.getenv('MYPY_FORCE_TERMINAL_WIDTH', '0'))
or shutil.get_terminal_size().columns
or DEFAULT_COLUMNS)
def soft_wrap(msg: str, max_len: int, first_offset: int,
num_indent: int = 0) -> str:
"""Wrap a long error message into few lines.
Breaks will only happen between words, and never inside a quoted group
(to avoid breaking types such as "Union[int, str]"). The 'first_offset' is
the width before the start of first line.
Pad every next line with 'num_indent' spaces. Every line will be at most 'max_len'
characters, except if it is a single word or quoted group.
For example:
first_offset
------------------------
path/to/file: error: 58: Some very long error message
that needs to be split in separate lines.
"Long[Type, Names]" are never split.
^^^^--------------------------------------------------
num_indent max_len
"""
words = split_words(msg)
next_line = words.pop(0)
lines: List[str] = []
while words:
next_word = words.pop(0)
max_line_len = max_len - num_indent if lines else max_len - first_offset
# Add 1 to account for space between words.
if len(next_line) + len(next_word) + 1 <= max_line_len:
next_line += ' ' + next_word
else:
lines.append(next_line)
next_line = next_word
lines.append(next_line)
padding = '\n' + ' ' * num_indent
return padding.join(lines)
def hash_digest(data: bytes) -> str:
"""Compute a hash digest of some data.
We use a cryptographic hash because we want a low probability of
accidental collision, but we don't really care about any of the
cryptographic properties.
"""
# Once we drop Python 3.5 support, we should consider using
# blake2b, which is faster.
return hashlib.sha256(data).hexdigest()
def parse_gray_color(cup: bytes) -> str:
"""Reproduce a gray color in ANSI escape sequence"""
if sys.platform == "win32":
assert False, "curses is not available on Windows"
set_color = ''.join([cup[:-1].decode(), 'm'])
gray = curses.tparm(set_color.encode('utf-8'), 1, 89).decode()
return gray
class FancyFormatter:
"""Apply color and bold font to terminal output.
This currently only works on Linux and Mac.
"""
def __init__(self, f_out: IO[str], f_err: IO[str], show_error_codes: bool) -> None:
self.show_error_codes = show_error_codes
# Check if we are in a human-facing terminal on a supported platform.
if sys.platform not in ('linux', 'darwin', 'win32'):
self.dummy_term = True
return
force_color = int(os.getenv('MYPY_FORCE_COLOR', '0'))
if not force_color and (not f_out.isatty() or not f_err.isatty()):
self.dummy_term = True
return
if sys.platform == 'win32':
self.dummy_term = not self.initialize_win_colors()
else:
self.dummy_term = not self.initialize_unix_colors()
if not self.dummy_term:
self.colors = {'red': self.RED, 'green': self.GREEN,
'blue': self.BLUE, 'yellow': self.YELLOW,
'none': ''}
def initialize_win_colors(self) -> bool:
"""Return True if initialization was successful and we can use colors, False otherwise"""
# Windows ANSI escape sequences are only supported on Threshold 2 and above.
# we check with an assert at runtime and an if check for mypy, as asserts do not
# yet narrow platform
assert sys.platform == 'win32'
if sys.platform == 'win32':
winver = sys.getwindowsversion()
if (winver.major < MINIMUM_WINDOWS_MAJOR_VT100
or winver.build < MINIMUM_WINDOWS_BUILD_VT100):
return False
import ctypes
kernel32 = ctypes.windll.kernel32
ENABLE_PROCESSED_OUTPUT = 0x1
ENABLE_WRAP_AT_EOL_OUTPUT = 0x2
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4
STD_OUTPUT_HANDLE = -11
kernel32.SetConsoleMode(kernel32.GetStdHandle(STD_OUTPUT_HANDLE),
ENABLE_PROCESSED_OUTPUT
| ENABLE_WRAP_AT_EOL_OUTPUT
| ENABLE_VIRTUAL_TERMINAL_PROCESSING)
self.BOLD = '\033[1m'
self.UNDER = '\033[4m'
self.BLUE = '\033[94m'
self.GREEN = '\033[92m'
self.RED = '\033[91m'
self.YELLOW = '\033[93m'
self.NORMAL = '\033[0m'
self.DIM = '\033[2m'
return True
return False
def initialize_unix_colors(self) -> bool:
"""Return True if initialization was successful and we can use colors, False otherwise"""
if sys.platform == "win32" or not CURSES_ENABLED:
return False
try:
# setupterm wants a fd to potentially write an "initialization sequence".
# We override sys.stdout for the daemon API so if stdout doesn't have an fd,
# just give it /dev/null.
try:
fd = sys.stdout.fileno()
except io.UnsupportedOperation:
with open("/dev/null", "rb") as f:
curses.setupterm(fd=f.fileno())
else:
curses.setupterm(fd=fd)
except curses.error:
# Most likely terminfo not found.
return False
bold = curses.tigetstr('bold')
under = curses.tigetstr('smul')
set_color = curses.tigetstr('setaf')
set_eseq = curses.tigetstr('cup')
normal = curses.tigetstr('sgr0')
if not (bold and under and set_color and set_eseq and normal):
return False
self.NORMAL = normal.decode()
self.BOLD = bold.decode()
self.UNDER = under.decode()
self.DIM = parse_gray_color(set_eseq)
self.BLUE = curses.tparm(set_color, curses.COLOR_BLUE).decode()
self.GREEN = curses.tparm(set_color, curses.COLOR_GREEN).decode()
self.RED = curses.tparm(set_color, curses.COLOR_RED).decode()
self.YELLOW = curses.tparm(set_color, curses.COLOR_YELLOW).decode()
return True
def style(self, text: str, color: Literal['red', 'green', 'blue', 'yellow', 'none'],
bold: bool = False, underline: bool = False, dim: bool = False) -> str:
"""Apply simple color and style (underlined or bold)."""
if self.dummy_term:
return text
if bold:
start = self.BOLD
else:
start = ''
if underline:
start += self.UNDER
if dim:
start += self.DIM
return start + self.colors[color] + text + self.NORMAL
def fit_in_terminal(self, messages: List[str],
fixed_terminal_width: Optional[int] = None) -> List[str]:
"""Improve readability by wrapping error messages and trimming source code."""
width = fixed_terminal_width or get_terminal_width()
new_messages = messages.copy()
for i, error in enumerate(messages):
if ': error:' in error:
loc, msg = error.split('error:', maxsplit=1)
msg = soft_wrap(msg, width, first_offset=len(loc) + len('error: '))
new_messages[i] = loc + 'error:' + msg
if error.startswith(' ' * DEFAULT_SOURCE_OFFSET) and '^' not in error:
# TODO: detecting source code highlights through an indent can be surprising.
# Restore original error message and error location.
error = error[DEFAULT_SOURCE_OFFSET:]
column = messages[i+1].index('^') - DEFAULT_SOURCE_OFFSET
# Let source have some space also on the right side, plus 6
# to accommodate ... on each side.
max_len = width - DEFAULT_SOURCE_OFFSET - 6
source_line, offset = trim_source_line(error, max_len, column, MINIMUM_WIDTH)
new_messages[i] = ' ' * DEFAULT_SOURCE_OFFSET + source_line
# Also adjust the error marker position.
new_messages[i+1] = ' ' * (DEFAULT_SOURCE_OFFSET + column - offset) + '^'
return new_messages
def colorize(self, error: str) -> str:
"""Colorize an output line by highlighting the status and error code."""
if ': error:' in error:
loc, msg = error.split('error:', maxsplit=1)
if not self.show_error_codes:
return (loc + self.style('error:', 'red', bold=True) +
self.highlight_quote_groups(msg))
codepos = msg.rfind('[')
if codepos != -1:
code = msg[codepos:]
msg = msg[:codepos]
else:
code = "" # no error code specified
return (loc + self.style('error:', 'red', bold=True) +
self.highlight_quote_groups(msg) + self.style(code, 'yellow'))
elif ': note:' in error:
loc, msg = error.split('note:', maxsplit=1)
formatted = self.highlight_quote_groups(self.underline_link(msg))
return loc + self.style('note:', 'blue') + formatted
elif error.startswith(' ' * DEFAULT_SOURCE_OFFSET):
# TODO: detecting source code highlights through an indent can be surprising.
if '^' not in error:
return self.style(error, 'none', dim=True)
return self.style(error, 'red')
else:
return error
def highlight_quote_groups(self, msg: str) -> str:
"""Make groups quoted with double quotes bold (including quotes).
This is used to highlight types, attribute names etc.
"""
if msg.count('"') % 2:
# Broken error message, don't do any formatting.
return msg
parts = msg.split('"')
out = ''
for i, part in enumerate(parts):
if i % 2 == 0:
out += self.style(part, 'none')
else:
out += self.style('"' + part + '"', 'none', bold=True)
return out
def underline_link(self, note: str) -> str:
"""Underline a link in a note message (if any).
This assumes there is at most one link in the message.
"""
match = re.search(r'https?://\S*', note)
if not match:
return note
start = match.start()
end = match.end()
return (note[:start] +
self.style(note[start:end], 'none', underline=True) +
note[end:])
def format_success(self, n_sources: int, use_color: bool = True) -> str:
"""Format short summary in case of success.
n_sources is total number of files passed directly on command line,
i.e. excluding stubs and followed imports.
"""
msg = 'Success: no issues found in {}' \
' source file{}'.format(n_sources, 's' if n_sources != 1 else '')
if not use_color:
return msg
return self.style(msg, 'green', bold=True)
def format_error(
self, n_errors: int, n_files: int, n_sources: int, *,
blockers: bool = False, use_color: bool = True
) -> str:
"""Format a short summary in case of errors."""
msg = 'Found {} error{} in {} file{}'.format(
n_errors, 's' if n_errors != 1 else '',
n_files, 's' if n_files != 1 else ''
)
if blockers:
msg += ' (errors prevented further checking)'
else:
msg += ' (checked {} source file{})'.format(n_sources, 's' if n_sources != 1 else '')
if not use_color:
return msg
return self.style(msg, 'red', bold=True)
def is_typeshed_file(file: str) -> bool:
# gross, but no other clear way to tell
return 'typeshed' in os.path.abspath(file).split(os.sep)
def is_stub_package_file(file: str) -> bool:
# Use hacky heuristics to check whether file is part of a PEP 561 stub package.
if not file.endswith('.pyi'):
return False
return any(component.endswith('-stubs')
for component in os.path.abspath(file).split(os.sep))
def unnamed_function(name: Optional[str]) -> bool:
return name is not None and name == "_"
# TODO: replace with uses of perf_counter_ns when support for py3.6 is dropped
# (or when mypy properly handles alternate definitions based on python version check
time_ref = time.perf_counter
def time_spent_us(t0: float) -> int:
return int((time.perf_counter() - t0) * 1e6)
|
py | 1a362891d3032509864794d18de7ac935eb62950 | """ Regression Template
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def main():
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Fitting the Regression Model to the dataset
# Create your regressor here
# Predicting a new result
y_pred = regressor.predict(6.5)
# Visualising the Regression results
plt.scatter(X, y, color='red')
plt.plot(X, regressor.predict(X), color='blue')
plt.title('Truth or Bluff (Regression Model)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Visualising the Regression results (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color='red')
plt.plot(X_grid, regressor.predict(X_grid), color='blue')
plt.title('Truth or Bluff (Regression Model)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
if __name__ == '__main__':
main()
|
py | 1a362ad32ceb7ed4ae239e2f280f1fd640218d79 | # -*- coding: utf-8 -*-
from locust import task,TaskSet
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import config
from common.util import is_ok
from behavior.client import Client
@Client.action
class tt(TaskSet):
def on_start(self):
# Override
pass
@task(100)
def test_json(self):
r = self._get(config.Api.test)
# @task()
# def stop(self):
# self._stop()
# # if self.in_mix:
# # self.interrupt()
# # print('info')
#
# def _stop(self):
# print('stop')
# Client.task_set = tt
|
py | 1a362aed6fd676c6579f74e2249ff12d7b8abd4c | # MIT License
#
# Copyright (c) 2019-2021 Ecole Polytechnique Federale Lausanne (EPFL)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# just an entry point for the setuptools script
from .lancet import main as lancet_main
# Need this entrypoint for lancet
def main():
lancet_main()
if __name__ == "__main__":
main() |
py | 1a362b0a0b723aac5da8754acbdf57280469dc18 | def graph_to_tree(N, edges, root):
from collections import defaultdict
children = defaultdict(list)
parents = [None] * N
root = 0
parents[root] = root
stack = [root]
while stack:
v = stack.pop()
for u in edges[v]:
if parents[u] is not None:
# already visited
continue
parents[u] = v
children[v].append(u)
stack.append(u)
return children, parents
|
py | 1a362bf6be38da9261e6ae93a01b26e6377ff961 |
from dask import dataframe as dd
import datetime
def time_exe():
now = datetime.datetime.now()
print (now.strftime("%Y-%m-%d %H:%M:%S"))
def chunk_filtering_yearwise_data(data_):
return data_[(data_[5]>1994) & (data_[5] <2006)]
chunksize = 64000000*1 #64000000 is equl to 64 MB, making it as around 512 mb
original_files_dir = "E:/download/eng-all-5gram-2012-extracted/7/"
dataset_path = "E:/download/proj/n5grm/small_ds/yearwise/"
import os
start = 1995
end = 2005
step = 1
folderpath = dataset_path
def split_data_year_wise(startyear,stopyear,yearsetp,basepath,fname,dataset):
print("start time")
time_exe()
stopyear= stopyear+1
for i in range(startyear, stopyear, yearsetp):
year_dd = dataset[dataset[5]==i]
path = os.path.join(basepath,str(i),fname)
if not os.path.exists(path):
os.makedirs(path)
print("processing year "+str(i))
year_dd.to_parquet(path,engine='pyarrow')
#year_dd.to_csv(path)
print("finisheed time")
time_exe()
def process_start():
for filename in os.listdir(original_files_dir):
print("file processing started "+ filename)
from dask import dataframe as dd
df = dd.read_csv(os.path.join(original_files_dir,filename),
sep='\s+',
header=None, blocksize=chunksize,error_bad_lines=False,
encoding='utf-8',engine='python')
split_data_year_wise(start,end,step,folderpath,filename,df)
def main():
print("starting......")
process_start()
if __name__ == "__main__":
main()
|
py | 1a362c3a712dfeeb0ad39e894668c647525ef925 | import os
from unittest import TestCase
from gwx_core.utils import extractor
from tests.stubs import file_stub
class ExtractorTest(TestCase):
def test_get_attribute_raise_type_error(self) -> None:
"""Assert that get_attribute method raises TypeError when the file type is not .py.
:return: None
"""
with self.assertRaises(TypeError):
extractor.get_attribute('/path/to/file.txt', 'file')
def test_get_attribute_raises_os_error(self) -> None:
"""Assert that get_attribute method raises OSError when the file is non existing.
:return:
"""
with self.assertRaises(OSError):
extractor.get_attribute('/path/to/file_stub.py', 'attribute')
def test_get_attributes_successfully(self) -> None:
"""Assert that the value that is extracted from the path to file,
is equal to the attribute of the actual file.
:return: None
"""
file = f'{os.path.dirname(os.path.abspath(__file__))}/../stubs/file_stub.py'
self.assertEqual(file_stub.attribute_name, extractor.get_attribute(file, 'attribute_name'))
|
py | 1a362c697d703f58f7d39315971ab15160040a89 | from pathlib import Path
import sys
from selenium.common.exceptions import TimeoutException
import re
import subprocess
import json
from typing import List, Dict
# pycharm complains that build_assets is an unresolved ref
# don't worry about it, the script still runs
from build_assets.selenium_runner.BuildSeleniumRunner import BuildSeleniumRunner
from build_assets import filehandler, arg_getters, util, api_handler
def main():
"""
Build the icons using Icomoon. Also optimize the svgs.
"""
runner = None
try:
args = arg_getters.get_selenium_runner_args()
new_icons = get_icons_for_building(args.icomoon_json_path, args.devicon_json_path, args.token)
if len(new_icons) == 0:
sys.exit("No files need to be uploaded. Ending script...")
print(f"There are {len(new_icons)} icons to be build. Here are they:", *new_icons, sep = "\n")
print("Begin optimizing files...")
optimize_svgs(new_icons, args.icons_folder_path)
print("Updating the icomoon json...")
update_icomoon_json(new_icons, args.icomoon_json_path)
print("Start the building icons process...")
icon_svgs = filehandler.get_svgs_paths(
new_icons, args.icons_folder_path, icon_versions_only=True)
zip_name = "devicon-v1.0.zip"
zip_path = Path(args.download_path, zip_name)
screenshot_folder = filehandler.create_screenshot_folder("./")
runner = BuildSeleniumRunner(args.download_path,
args.geckodriver_path, args.headless)
runner.build_icons(args.icomoon_json_path, zip_path,
icon_svgs, screenshot_folder)
filehandler.extract_files(str(zip_path), args.download_path)
filehandler.rename_extracted_files(args.download_path)
print("Creating the release message by querying the GitHub API...")
get_release_message(args.token)
print("Task completed.")
except TimeoutException as e:
util.exit_with_err("Selenium Time Out Error: \n" + str(e))
except Exception as e:
util.exit_with_err(e)
finally:
if runner is not None:
runner.close()
def get_icons_for_building(icomoon_json_path: str, devicon_json_path: str, token: str):
"""
Get the icons for building.
:param icomoon_json_path - the path to the `icomoon.json`.
:param devicon_json_path - the path to the `devicon.json`.
:param token - the token to access the GitHub API.
:return a list of dict containing info on the icons. These are
from the `devicon.json`.
"""
devicon_json = filehandler.get_json_file_content(devicon_json_path)
pull_reqs = api_handler.get_merged_pull_reqs_since_last_release(token)
new_icons = []
for pull_req in pull_reqs:
if api_handler.is_feature_icon(pull_req):
filtered_icon = util.find_object_added_in_pr(devicon_json, pull_req["title"])
if filtered_icon not in new_icons:
new_icons.append(filtered_icon)
# get any icons that might not have been found by the API
# sometimes happen due to the PR being opened before the latest build release
new_icons_from_devicon_json = filehandler.find_new_icons_in_devicon_json(
devicon_json_path, icomoon_json_path)
for icon in new_icons_from_devicon_json:
if icon not in new_icons:
new_icons.append(icon)
return new_icons
def optimize_svgs(new_icons: List[str], icons_folder_path: str):
"""
Optimize the newly added svgs. This is done in batches
since the command line has a limit on characters allowed.
:param new_icons - the new icons that need to be optimized.
:param icons_folder_path - the path to the /icons folder.
"""
svgs = filehandler.get_svgs_paths(new_icons, icons_folder_path, icon_versions_only=False)
start = 0
step = 10
for i in range(start, len(svgs), step):
batch = svgs[i:i + step]
subprocess.run(["npm", "run", "optimize-svg", "--", f"--svgFiles={json.dumps(batch)}"], shell=True)
def update_icomoon_json(new_icons: List[str], icomoon_json_path: str):
"""
Update the `icomoon.json` if it contains any icons
that needed to be updated. This will remove the icons
from the `icomoon.json` so the build script will reupload
it later.
"""
icomoon_json = filehandler.get_json_file_content(icomoon_json_path)
cur_len = len(icomoon_json["icons"])
messages = []
wrapper_function = lambda icomoon_icon : find_icomoon_icon_not_in_new_icons(
icomoon_icon, new_icons, messages)
icons_to_keep = filter(wrapper_function, icomoon_json["icons"])
icomoon_json["icons"] = list(icons_to_keep)
new_len = len(icomoon_json["icons"])
print(f"Update completed. Removed {cur_len - new_len} icons:", *messages, sep='\n')
filehandler.write_to_file(icomoon_json_path, json.dumps(icomoon_json))
def find_icomoon_icon_not_in_new_icons(icomoon_icon: Dict, new_icons: List, messages: List):
"""
Find all the icomoon icons that are not listed in the new icons.
This also add logging for which icons were removed.
:param icomoon_icon - a dict object from the icomoon.json's `icons` attribute.
:param new_icons - a list of new icons. Each element is an object from the `devicon.json`.
:param messages - an empty list where the function can attach logging on which
icon were removed.
"""
for new_icon in new_icons:
pattern = re.compile(f"^{new_icon['name']}-")
if pattern.search(icomoon_icon["properties"]["name"]):
message = f"-'{icomoon_icon['properties']['name']}' cause it matches '{new_icon['name']}'"
messages.append(message)
return False
return True
def get_release_message(token):
"""
Get the release message for the latest build and write
the result in a file.
:param token: the GitHub API token to access the API.
"""
# fetch first page by default
data = api_handler.get_merged_pull_reqs_since_last_release(token)
newIcons = []
features = []
print("Parsing through the pull requests...")
for pullData in data:
authors = api_handler.find_all_authors(pullData, token)
markdown = f"- [{pullData['title']}]({pullData['html_url']}) by {authors}."
if api_handler.is_feature_icon(pullData):
newIcons.append(markdown)
else:
features.append(markdown)
print("Constructing message...")
thankYou = "A huge thanks to all our maintainers and contributors for making this release possible!"
iconTitle = f"**{len(newIcons)} New Icons**"
featureTitle = f"**{len(features)} New Features**"
finalString = "{0}\n\n {1}\n{2}\n\n {3}\n{4}".format(thankYou,
iconTitle, "\n".join(newIcons),
featureTitle, "\n".join(features))
print("--------------Here is the build message--------------\n", finalString)
release_message_path = "./release_message.txt"
filehandler.write_to_file(release_message_path, finalString)
print("Script finished")
if __name__ == "__main__":
main()
|
py | 1a362c767b866fce0b5c875b44fb8860743fbdf0 | import rdkit
import rdkit.Chem as Chem
import numpy as np
import pandas as pd
import os
# import tensorflow as tf
elem_list = ['C', 'O', 'N', 'F', 'Br', 'Cl', 'S',
'Si', 'B', 'I', 'K', 'Na', 'P', 'Mg', 'Li', 'Al', 'H']
atom_fdim_geo = len(elem_list) + 6 + 6 + 6 + 1
bond_fdim_geo = 6
bond_fdim_qm = 25 + 40
max_nb = 10
qm_descriptors = None
def initialize_qm_descriptors(df=None, path=None):
global qm_descriptors
if path is not None:
qm_descriptors = pd.read_pickle(path).set_index('smiles')
elif df is not None:
qm_descriptors = df
def get_atom_classes():
atom_classes = {}
token = 0
for e in elem_list: #element
for d in [0, 1, 2, 3, 4, 5]: #degree
for ev in [1, 2, 3, 4, 5, 6]: #explicit valence
for iv in [0, 1, 2, 3, 4, 5]: #inexplicit valence
atom_classes[str((e, d, ev, iv))] = token
token += 1
return atom_classes
def rbf_expansion(expanded, mu=0, delta=0.01, kmax=8):
k = np.arange(0, kmax)
return np.exp(-(expanded - (mu + delta * k))**2 / delta)
def onek_encoding_unk(x, allowable_set):
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
def atom_features(atom):
return np.array(onek_encoding_unk(atom.GetSymbol(), elem_list)
+ onek_encoding_unk(atom.GetDegree(), [0, 1, 2, 3, 4, 5])
+ onek_encoding_unk(atom.GetExplicitValence(), [1, 2, 3, 4, 5, 6])
+ onek_encoding_unk(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5])
+ [atom.GetIsAromatic()], dtype=np.float32)
def bond_features(bond):
bt = bond.GetBondType()
return np.array(
[bt == Chem.rdchem.BondType.SINGLE, bt == Chem.rdchem.BondType.DOUBLE, bt == Chem.rdchem.BondType.TRIPLE,
bt == Chem.rdchem.BondType.AROMATIC, bond.GetIsConjugated(), bond.IsInRing()], dtype=np.float32)
def _mol2graph(rs, selected_descriptors, core=[]):
atom_fdim_qm = 50 * len(selected_descriptors)
mol_rs = Chem.MolFromSmiles(rs)
if not mol_rs:
raise ValueError("Could not parse smiles string:", smiles)
fatom_index = {a.GetIntProp('molAtomMapNumber') - 1: a.GetIdx() for a in mol_rs.GetAtoms()}
fbond_index = {'{}-{}'.format(*sorted([b.GetBeginAtom().GetIntProp('molAtomMapNumber') - 1,
b.GetEndAtom().GetIntProp('molAtomMapNumber') - 1])): b.GetIdx()
for b in mol_rs.GetBonds()}
n_atoms = mol_rs.GetNumAtoms()
n_bonds = max(mol_rs.GetNumBonds(), 1)
fatoms_geo = np.zeros((n_atoms, atom_fdim_geo))
fatoms_qm = np.zeros((n_atoms, atom_fdim_qm))
fbonds_geo = np.zeros((n_bonds, bond_fdim_geo))
fbonds_qm = np.zeros((n_bonds, bond_fdim_qm))
atom_nb = np.zeros((n_atoms, max_nb), dtype=np.int32)
bond_nb = np.zeros((n_atoms, max_nb), dtype=np.int32)
num_nbs = np.zeros((n_atoms,), dtype=np.int32)
core_mask = np.zeros((n_atoms,), dtype=np.int32)
for smiles in rs.split('.'):
mol = Chem.MolFromSmiles(smiles)
fatom_index_mol = {a.GetIntProp('molAtomMapNumber') - 1: a.GetIdx() for a in mol.GetAtoms()}
qm_series = qm_descriptors.loc[smiles]
partial_charge = qm_series['partial_charge'].reshape(-1, 1)
partial_charge = np.apply_along_axis(rbf_expansion, -1, partial_charge, -2.0, 0.06, 50)
fukui_elec = qm_series['fukui_elec'].reshape(-1, 1)
fukui_elec = np.apply_along_axis(rbf_expansion, -1, fukui_elec, 0, 0.02, 50)
fukui_neu = qm_series['fukui_neu'].reshape(-1, 1)
fukui_neu = np.apply_along_axis(rbf_expansion, -1, fukui_neu, 0, 0.02, 50)
nmr = qm_series['NMR'].reshape(-1, 1)
nmr = np.apply_along_axis(rbf_expansion, -1, nmr, 0.0, 0.06, 50)
bond_index = np.expand_dims(qm_series['bond_order_matrix'], -1)
bond_index = np.apply_along_axis(rbf_expansion, -1, bond_index, 0.5, 0.1, 25)
bond_distance = np.expand_dims(qm_series['distance_matrix'], -1)
bond_distance = np.apply_along_axis(rbf_expansion, -1, bond_distance, 0.5, 0.05, 40)
selected_descriptors = set(selected_descriptors)
if selected_descriptors == {"partial_charge", "fukui_elec", "fukui_neu", "nmr"}:
atom_qm_descriptor = np.concatenate([partial_charge, fukui_elec, fukui_neu, nmr], axis=-1)
elif selected_descriptors == {"partial_charge", "nmr"}:
atom_qm_descriptor = np.concatenate([partial_charge, nmr], axis=-1)
elif selected_descriptors == {"fukui_elec", "fukui_neu"}:
atom_qm_descriptor = np.concatenate([fukui_elec, fukui_neu], axis=-1)
elif selected_descriptors == {"only_bonds"}:
atom_qm_descriptor = partial_charge
for map_idx in fatom_index_mol:
fatoms_geo[fatom_index[map_idx], :] = atom_features(mol_rs.GetAtomWithIdx(fatom_index[map_idx]))
fatoms_qm[fatom_index[map_idx], :] = atom_qm_descriptor[fatom_index_mol[map_idx], :]
if fatom_index[map_idx] in core:
core_mask[fatom_index[map_idx]] = 1
for bond in mol.GetBonds():
a1i, a2i = bond.GetBeginAtom().GetIntProp('molAtomMapNumber'), \
bond.GetEndAtom().GetIntProp('molAtomMapNumber')
idx = fbond_index['{}-{}'.format(*sorted([a1i-1, a2i-1]))]
a1 = fatom_index[a1i-1]
a2 = fatom_index[a2i-1]
a1i = fatom_index_mol[a1i-1]
a2i = fatom_index_mol[a2i-1]
if num_nbs[a1] == max_nb or num_nbs[a2] == max_nb:
raise Exception(smiles)
atom_nb[a1, num_nbs[a1]] = a2
atom_nb[a2, num_nbs[a2]] = a1
bond_nb[a1, num_nbs[a1]] = idx
bond_nb[a2, num_nbs[a2]] = idx
num_nbs[a1] += 1
num_nbs[a2] += 1
fbonds_geo[idx, :] = bond_features(bond)
fbonds_qm[idx, :25] = bond_index[a1i, a2i]
fbonds_qm[idx, 25:] = bond_distance[a1i, a2i]
return fatoms_geo, fatoms_qm, fbonds_qm, atom_nb, bond_nb, num_nbs, core_mask
def smiles2graph_pr(r_smiles, p_smiles, selected_descriptors=["partial_charge", "fukui_elec", "fukui_neu", "nmr"],
core_buffer=0):
rs, rs_core, p_core = _get_reacting_core(r_smiles, p_smiles, core_buffer)
rs_features = _mol2graph(r_smiles, selected_descriptors, core=rs_core)
return rs_features, r_smiles
def _get_reacting_core(rs, p, buffer):
'''
use molAtomMapNumber of molecules
buffer: neighbor to be cosidered as reacting center
return: atomidx of reacting core
'''
r_mols = Chem.MolFromSmiles(rs)
p_mol = Chem.MolFromSmiles(p)
rs_dict = {a.GetIntProp('molAtomMapNumber'): a for a in r_mols.GetAtoms()}
p_dict = {a.GetIntProp('molAtomMapNumber'): a for a in p_mol.GetAtoms()}
rs_reactants = []
for r_smiles in rs.split('.'):
for a in Chem.MolFromSmiles(r_smiles).GetAtoms():
if a.GetIntProp('molAtomMapNumber') in p_dict:
rs_reactants.append(r_smiles)
break
rs_reactants = '.'.join(rs_reactants)
core_mapnum = set()
for a_map in p_dict:
# FIXME chiral change
# if str(p_dict[a_map].GetChiralTag()) != str(rs_dict[a_map].GetChiralTag()):
# core_mapnum.add(a_map)
a_neighbor_in_p = set([a.GetIntProp('molAtomMapNumber') for a in p_dict[a_map].GetNeighbors()])
a_neighbor_in_rs = set([a.GetIntProp('molAtomMapNumber') for a in rs_dict[a_map].GetNeighbors()])
if a_neighbor_in_p != a_neighbor_in_rs:
core_mapnum.add(a_map)
else:
for a_neighbor in a_neighbor_in_p:
b_in_p = p_mol.GetBondBetweenAtoms(p_dict[a_neighbor].GetIdx(), p_dict[a_map].GetIdx())
b_in_r = r_mols.GetBondBetweenAtoms(rs_dict[a_neighbor].GetIdx(), rs_dict[a_map].GetIdx())
if b_in_p.GetBondType() != b_in_r.GetBondType():
core_mapnum.add(a_map)
core_rs = _get_buffer(r_mols, [rs_dict[a].GetIdx() for a in core_mapnum], buffer)
core_p = _get_buffer(p_mol, [p_dict[a].GetIdx() for a in core_mapnum], buffer)
fatom_index = \
{a.GetIntProp('molAtomMapNumber') - 1: a.GetIdx() for a in Chem.MolFromSmiles(rs_reactants).GetAtoms()}
core_rs = [fatom_index[x] for x in core_rs]
core_p = [fatom_index[x] for x in core_p]
return rs_reactants, core_rs, core_p
def _get_buffer(m, cores, buffer):
neighbors = set(cores)
for i in range(buffer):
neighbors_temp = list(neighbors)
for c in neighbors_temp:
neighbors.update([n.GetIdx() for n in m.GetAtomWithIdx(c).GetNeighbors()])
neighbors = [m.GetAtomWithIdx(x).GetIntProp('molAtomMapNumber') - 1 for x in neighbors]
return neighbors
def pack2D(arr_list):
N = max([x.shape[0] for x in arr_list])
M = max([x.shape[1] for x in arr_list])
a = np.zeros((len(arr_list), N, M))
for i, arr in enumerate(arr_list):
n = arr.shape[0]
m = arr.shape[1]
a[i, 0:n, 0:m] = arr
return a
def pack2D_withidx(arr_list):
N = max([x.shape[0] for x in arr_list])
M = max([x.shape[1] for x in arr_list])
a = np.zeros((len(arr_list), N, M, 2))
for i, arr in enumerate(arr_list):
n = arr.shape[0]
m = arr.shape[1]
a[i, 0:n, 0:m, 0] = i
a[i, 0:n, 0:m, 1] = arr
return a
def pack1D(arr_list):
N = max([x.shape[0] for x in arr_list])
a = np.zeros((len(arr_list), N))
for i, arr in enumerate(arr_list):
n = arr.shape[0]
a[i, 0:n] = arr
return a
def get_mask(arr_list):
N = max([x.shape[0] for x in arr_list])
a = np.zeros((len(arr_list), N))
for i, arr in enumerate(arr_list):
for j in range(arr.shape[0]):
a[i][j] = 1
return a
def smiles2graph_list(smiles_list, idxfunc=lambda x: x.GetIdx()):
res = list(map(lambda x: smiles2graph(x, idxfunc), smiles_list))
fatom_list, fbond_list, gatom_list, gbond_list, nb_list = zip(*res)
return pack2D(fatom_list), pack2D(fbond_list), pack2D_withidx(gatom_list), pack2D_withidx(gbond_list), pack1D(
nb_list), get_mask(fatom_list)
def get_bond_edits(reactant_smi, product_smi):
reactants = Chem.MolFromSmiles(reactant_smi)
products = Chem.MolFromSmiles(product_smi)
conserved_maps = [a.GetAtomMapNum() for a in reactants.GetAtoms() if a.GetAtomMapNum()]
bond_changes = set()
bonds_prev = {}
for bond in reactants.GetBonds():
nums = sorted(
[bond.GetBeginAtom().GetAtomMapNum(), bond.GetEndAtom().GetAtomMapNum()])
bonds_prev['{}~{}'.format(nums[0], nums[1])] = bond.GetBondTypeAsDouble()
bonds_new = {}
for bond in products.GetBonds():
nums = sorted(
[bond.GetBeginAtom().GetAtomMapNum(), bond.GetEndAtom().GetAtomMapNum()])
if (nums[0] not in conserved_maps) or (nums[1] not in conserved_maps): continue
bonds_new['{}~{}'.format(nums[0], nums[1])] = bond.GetBondTypeAsDouble()
for bond in bonds_prev:
if bond not in bonds_new:
bond_changes.add((bond.split('~')[0], bond.split('~')[1], 0.0)) # lost bond
else:
if bonds_prev[bond] != bonds_new[bond]:
bond_changes.add((bond.split('~')[0], bond.split('~')[1], bonds_new[bond])) # changed bond
for bond in bonds_new:
if bond not in bonds_prev:
bond_changes.add((bond.split('~')[0], bond.split('~')[1], bonds_new[bond])) # new bond
return bond_changes
if __name__ == "__main__":
graph = smiles2graph_pr("[CH3:1][C@@H:2]([NH2:3])[CH2:4][Cl:5].[F-:6]", "[3, 4, 1]")
|
py | 1a362e72deee45c7c3789b7b386802f450307efa | from manim import *
from tests.test_graphical_units.testing.frames_comparison import frames_comparison
__module_test__ = "geometry"
@frames_comparison(last_frame=True)
def test_Coordinates(scene):
dots = [Dot(np.array([x, y, 0])) for x in range(-7, 8) for y in range(-4, 5)]
scene.add(VGroup(*dots))
@frames_comparison
def test_Arc(scene):
a = Arc(radius=1, start_angle=PI)
scene.add(a)
@frames_comparison
def test_ArcBetweenPoints(scene):
a = ArcBetweenPoints(np.array([1, 1, 0]), np.array([2, 2, 0]))
scene.add(a)
@frames_comparison
def test_CurvedArrow(scene):
a = CurvedArrow(np.array([1, 1, 0]), np.array([2, 2, 0]))
scene.add(a)
@frames_comparison
def test_CustomDoubleArrow(scene):
a = DoubleArrow(
np.array([-1, -1, 0]),
np.array([1, 1, 0]),
tip_shape_start=ArrowCircleTip,
tip_shape_end=ArrowSquareFilledTip,
)
scene.add(a)
@frames_comparison
def test_Circle(scene):
circle = Circle()
scene.add(circle)
@frames_comparison
def test_CirclePoints(scene):
circle = Circle.from_three_points(LEFT, LEFT + UP, UP * 2)
scene.add(circle)
@frames_comparison
def test_Dot(scene):
dot = Dot()
scene.add(dot)
@frames_comparison
def test_DashedVMobject(scene):
circle = DashedVMobject(Circle(), 12, 0.9)
line = DashedLine(dash_length=0.5)
scene.add(circle, line)
@frames_comparison
def test_AnnotationDot(scene):
adot = AnnotationDot()
scene.add(adot)
@frames_comparison
def test_Ellipse(scene):
e = Ellipse()
scene.add(e)
@frames_comparison
def test_Sector(scene):
e = Sector()
scene.add(e)
@frames_comparison
def test_Annulus(scene):
a = Annulus()
scene.add(a)
@frames_comparison
def test_AnnularSector(scene):
a = AnnularSector()
scene.add(a)
@frames_comparison
def test_Line(scene):
a = Line(np.array([1, 1, 0]), np.array([2, 2, 0]))
scene.add(a)
@frames_comparison
def test_Elbow(scene):
a = Elbow()
scene.add(a)
@frames_comparison
def test_DoubleArrow(scene):
a = DoubleArrow()
scene.add(a)
@frames_comparison
def test_Vector(scene):
a = Vector(UP)
scene.add(a)
@frames_comparison
def test_Polygon(scene):
a = Polygon(*[np.array([1, 1, 0]), np.array([2, 2, 0]), np.array([2, 3, 0])])
scene.add(a)
@frames_comparison
def test_Rectangle(scene):
a = Rectangle()
scene.add(a)
@frames_comparison
def test_RoundedRectangle(scene):
a = RoundedRectangle()
scene.add(a)
@frames_comparison
def test_Arrange(scene):
s1 = Square()
s2 = Square()
x = VGroup(s1, s2).set_x(0).arrange(buff=1.4)
scene.add(x)
@frames_comparison(last_frame=False)
def test_ZIndex(scene):
circle = Circle().set_fill(RED, opacity=1)
square = Square(side_length=1.7).set_fill(BLUE, opacity=1)
triangle = Triangle().set_fill(GREEN, opacity=1)
square.z_index = 0
triangle.z_index = 1
circle.z_index = 2
scene.play(FadeIn(VGroup(circle, square, triangle)))
scene.play(ApplyMethod(circle.shift, UP))
scene.play(ApplyMethod(triangle.shift, 2 * UP))
@frames_comparison
def test_Angle(scene):
l1 = Line(ORIGIN, RIGHT)
l2 = Line(ORIGIN, UP)
a = Angle(l1, l2)
scene.add(a)
@frames_comparison
def test_RightAngle(scene):
l1 = Line(ORIGIN, RIGHT)
l2 = Line(ORIGIN, UP)
a = RightAngle(l1, l2)
scene.add(a)
@frames_comparison
def test_Polygram(scene):
hexagram = Polygram(
[[0, 2, 0], [-np.sqrt(3), -1, 0], [np.sqrt(3), -1, 0]],
[[-np.sqrt(3), 1, 0], [0, -2, 0], [np.sqrt(3), 1, 0]],
)
scene.add(hexagram)
@frames_comparison
def test_RegularPolygram(scene):
pentagram = RegularPolygram(5, radius=2)
octagram = RegularPolygram(8, radius=2)
scene.add(VGroup(pentagram, octagram).arrange(RIGHT))
@frames_comparison
def test_Star(scene):
star = Star(outer_radius=2)
scene.add(star)
|
py | 1a3630dba4377e6c9069060ff461b91ce4d75af1 | #!/usr/bin/env python
# D. Jones - 1/10/14
"""This code is from the IDL Astronomy Users Library with
modifications from Dan Scolnic.
(adapted for IDL from DAOPHOT, then translated from IDL to Python).
Subroutine of GETPSF to perform a one-star least-squares fit,
part of the DAOPHOT PSF photometry sequence. This version requires
input noise and mask images.
CALLING SEQUENCE:
from PythonPhot import pkfit_noise as pkfit
pk = pkfit.pkfit_class(f, gauss, psf,
ronois, phpadu, noise_image, mask_image )
errmag,chi,sharp,niter,scale,xnew,ynew = pk.pkfit_noise(scale,x,y,sky,radius)
PKFIT CLASS INPUTS:
f - NX by NY array containing actual picture data.
ronois - readout noise per pixel, scalar
phpadu - photons per analog digital unit, scalar
gauss - vector containing the values of the five parameters defining
the analytic Gaussian which approximates the core of the PSF.
psf - an NPSF by NPSF look-up table containing corrections from
the Gaussian approximation of the PSF to the true PSF.
noise_image - the noise image corresponding to f
mask_image - the mask image corresponding to f. Masked pixels are not used.
PKFIT FUNCTION INPUTS:
x, y - the initial estimates of the centroid of the star relative
to the corner (0,0) of the subarray. Upon return, the
final computed values of X and Y will be passed back to the
calling routine.
sky - the local sky brightness value, as obtained from APER
radius - the fitting radius-- only pixels within RADIUS of the
instantaneous estimate of the star's centroid will be
included in the fit, scalar
OPTIONAL PKFIT FUNCTION INPUTS:
xyout - if True, return new x and y positions
maxiter - maximum iterations (default = 25)
INPUT-OUTPUT:
scale - the initial estimate of the brightness of the star,
expressed as a fraction of the brightness of the PSF.
Upon return, the final computed value of SCALE will be
passed back to the calling routine.
RETURNS:
errmag - the estimated standard error of the value of SCALE
returned by this routine.
chi - the estimated goodness-of-fit statistic: the ratio
of the observed pixel-to-pixel mean absolute deviation from
the profile fit, to the value expected on the basis of the
noise as determined from Poisson statistics and the
readout noise.
sharp - a goodness-of-fit statistic describing how much broader
the actual profile of the object appears than the
profile of the PSF.
niter - the number of iterations the solution required to achieve
convergence. If NITER = 25, the solution did not converge.
If for some reason a singular matrix occurs during the least-
squares solution, this will be flagged by setting NITER = -1.
EXAMPLE:
from astropy.io import fits as pyfits
from PythonPhot import pkfit_noise as pkfit
# read in the FITS images
image = pyfits.getdata(fits_filename)
noiseim = pyfits.getdata(fits_noise_filename)
maskim = pyfits.getdata(fits__mask_filename)
# read in the PSF image
psf = pyfits.getdata(psf_filename)
hpsf = pyfits.getheader(psf_filename)
gauss = [hpsf['GAUSS1'],hpsf['GAUSS2'],hpsf['GAUSS3'],hpsf['GAUSS4'],hpsf['GAUSS5']]
# x and y points for PSF fitting
xpos,ypos = np.array([1450,1400]),np.array([1550,1600])
# run 'aper' on x,y coords to get sky values
mag,magerr,flux,fluxerr,sky,skyerr,badflag,outstr = \
aper.aper(image,xpos,ypos,phpadu=1,apr=5,zeropoint=25,
skyrad=[40,50],badpix=[-12000,60000],exact=True)
# load the pkfit class
pk = pkfit.pkfit_class(image,gauss,psf,1,1,noiseim,maskim)
# do the PSF fitting
for x,y,s in zip(xpos,ypos,sky):
errmag,chi,sharp,niter,scale = \
pk.pkfit_norecent_noise(1,x,y,s,5)
flux = scale*10**(0.4*(25.-hpsf['PSFMAG']))
dflux = errmag*10**(0.4*(25.-hpsf['PSFMAG']))
print('PSF fit to coords %.2f,%.2f gives flux %s +/- %s'%(x,y,flux,dflux))
RESTRICTIONS:
No parameter checking is performed
REVISON HISTORY:
Adapted from the official DAO version of 1985 January 25
Version 2.0 W. Landsman STX November, 1988
Converted to IDL V5.0 W. Landsman September, 1997
Converted to Python D. Jones January, 2014
"""
import numpy as np
from numpy import sqrt
from scipy import linalg
from . import dao_value
sqrt,where,abs,shape,zeros,array,isnan,\
arange,matrix,exp,sum,isinf,median,ones,bool = \
np.sqrt,np.where,np.abs,np.shape,\
np.zeros,np.array,np.isnan,\
np.arange,np.matrix,np.exp,\
np.sum,np.isinf,np.median,np.ones,np.bool
class pkfit_class:
def __init__(self,image,gauss,psf,
ronois,phpadu,
noise_image,mask_image):
self.f = image
self.gauss = gauss
self.psf = psf
self.fnoise = noise_image
self.fmask = mask_image
self.ronois = ronois
self.phpadu = phpadu
def pkfit_noise(self,scale,x,y,sky,radius,
maxiter=25,
debug=False,debug2=False,
xyout = False):
f = self.f; gauss = self.gauss; psf = self.psf
fnoise = self.fnoise; fmask = self.fmask
if debug2:
import time
tstart = time.time()
if f.dtype != 'float64': f = f.astype('float64')
# psf1d = psf.reshape(shape(psf)[0]**2.)
s = shape(f) #Get array dimensions
nx = s[1] ; ny = s[0] #Initialize a few things for the solution
redo = 0
pkerr = 0.027/(gauss[3]*gauss[4])**2.
clamp = zeros(3) + 1.
dtold = zeros(3)
niter = 0
chiold = 1.
if debug:
print('PKFIT: ITER X Y SCALE ERRMAG CHI SHARP')
loop=True
while loop: #Begin the big least-squares loop
niter = niter+1
if isnan(x) or isnan(y):
scale=np.nan
errmag=np.nan
chi=np.nan
sharp=np.nan
if xyout:
return(errmag,chi,sharp,niter,scale,x,y)
else:
return(errmag,chi,sharp,niter,scale)
ixlo = int(x-radius)
if ixlo < 0: ixlo = 0 #Choose boundaries of subarray containing
iylo = int(y-radius)
if iylo < 0: iylo = 0 # 3points inside the fitting radius
ixhi = int(x+radius) +1
if ixhi > (nx-1): ixhi = nx-1
iyhi = int(y+radius) +1
if iyhi > ny-1: iyhi = ny-1
ixx = ixhi-ixlo+1
iyy = iyhi-iylo+1
dy = arange(iyy) + iylo - y #X distance vector from stellar centroid
dysq = dy**2
dx = arange(ixx) + ixlo - x
dxsq = dx**2
rsq = zeros([iyy,ixx]) #RSQ - array of squared
for j in range(iyy): rsq[j,:] = (dxsq+dysq[j])/radius**2
# The fitting equation is of the form
#
# Observed brightness =
# SCALE + delta(SCALE) * PSF + delta(Xcen)*d(PSF)/d(Xcen) +
# delta(Ycen)*d(PSF)/d(Ycen)
#
# and is solved for the unknowns delta(SCALE) ( = the correction to
# the brightness ratio between the program star and the PSF) and
# delta(Xcen) and delta(Ycen) ( = corrections to the program star's
# centroid).
#
# The point-spread function is equal to the sum of the integral under
# a two-dimensional Gaussian profile plus a value interpolated from
# a look-up table.
# D. Jones - noise edit from Scolnic
good = where((rsq < 1.) &
(fnoise[iylo:iyhi+1,ixlo:ixhi+1] > 0) &
(fmask[iylo:iyhi+1,ixlo:ixhi+1] == 0))
ngood = len(good[0])
if ngood < 1: ngood = 1
t = zeros([3,ngood])
if not len(good[0]):
scale=np.nan
errmag=np.nan
chi=np.nan
sharp=np.nan
if xyout:
return(errmag,chi,sharp,niter,scale,x,y)
else:
return(errmag,chi,sharp,niter,scale)
dx = dx[good[1]]
dy = dy[good[0]]
model,dvdx,dvdy = dao_value.dao_value(dx, dy, gauss,
psf, #psf1d=psf1d,
deriv=True)#,ps1d=True)
if debug:
print('model created ')
if xyout:
return(errmag,chi,sharp,niter,scale,x,y)
else:
return(errmag,chi,sharp,niter,scale)
t[0,:] = model
sa=shape(dvdx)
if sa[0] > ngood or len(sa) == 0:
scale=0
if xyout:
return(errmag,chi,sharp,niter,scale,x,y)
else:
return(errmag,chi,sharp,niter,scale)
t[1,:] = -scale*dvdx
t[2,:] = -scale*dvdy
fsub = f[iylo:iyhi+1,ixlo:ixhi+1]
fsub = fsub[good[0],good[1]]
# D. Jones - added for noise version from Scolnic
fsubnoise=fnoise[iylo:iyhi+1,ixlo:ixhi+1]
rsq = rsq[good[0],good[1]]
# D. Jones - noise addition from Scolnic
fsubnoise = fsubnoise[good[0],good[1]]
sig=fsubnoise
sigsq = fsubnoise**2.
# D. Jones - added for noise version from Scolnic
# Scolnic Added!!!
#
yx=zeros(1)
yx[0]=sky
skys=yx[0]
sky=skys
df = fsub - scale*model - sky #Residual of the brightness from the PSF fit
# The expected random error in the pixel is the quadratic sum of
# the Poisson statistics, plus the readout noise, plus an estimated
# error of 0.75% of the total brightness for the difficulty of flat-
# fielding and bias-correcting the chip, plus an estimated error of
# of some fraction of the fourth derivative at the peak of the profile,
# to account for the difficulty of accurately interpolating within the
# point-spread function. The fourth derivative of the PSF is
# proportional to H/sigma**4 (sigma is the Gaussian width parameter for
# the stellar core); using the geometric mean of sigma(x) and sigma(y),
# this becomes H/ sigma(x)*sigma(y) **2. The ratio of the fitting
# error to this quantity is estimated from a good-seeing CTIO frame to
# be approximately 0.027 (see definition of PKERR above.)
fpos = (fsub-df) #Raw data - residual = model predicted intensity
fposrow = where(fpos < 0.)[0]
if len(fposrow): fpos[fposrow] = 0
# D. Jones - noise addition from Scolnic - but ronois is never referenced, so I've omitted this
# self.ronois=median(fsubnoise**2.-(fpos/self.phpadu + (0.0075*fpos)**2. + (pkerr*(fpos-skys))**2.))
# D. Jones - noise addition from Scolnic
sig=fsubnoise
sigsq = fsubnoise**2
relerr = df/sig
# SIG is the anticipated standard error of the intensity
# including readout noise, Poisson photon statistics, and an estimate
# of the standard error of interpolating within the PSF.
rhosq = zeros([iyy,ixx])
for j in range(iyy): rhosq[j,:] = (dxsq/gauss[3]**2+dysq[j]/gauss[4]**2)
rhosq = rhosq[good[0],good[1]]
badflag = False
if niter >= 2: #Reject any pixel with 10 sigma residual
badpix = where( abs(relerr/chiold) >= 10. )[0]
nbad = len(badpix)
# scolnic added
sbd=shape(badpix)
sdf=shape(df)
if sbd[0] == sdf[0]:
scale=np.nan
errmag=np.nan
if xyout:
return(errmag,chi,sharp,niter,scale,x,y)
else:
return(errmag,chi,sharp,niter,scale)
if nbad > 0:
# D. Jones - to fix a bug in the original code
goodind = arange(len(rsq))
goodind = item_remove(badpix,goodind)
badflag = True
fsub = item_remove(badpix, fsub)
df = item_remove(badpix,df)
sigsq = item_remove(badpix,sigsq)
sig = item_remove(badpix,sig)
relerr = item_remove(badpix,relerr)
rsq = item_remove(badpix,rsq)
rhosq = item_remove(badpix,rhosq)
fsubnoise = item_remove(badpix,fsubnoise)
ngood = ngood-badpix
wt = 5./(5.+rsq/(1.-rsq))
lilrho = where(rhosq <= 36.)[0] #Include only pixels within 6 sigma of centroid
if not len(lilrho):
scale=np.nan
errmag=np.nan
sharp=np.nan
chi=np.nan
if xyout:
return(errmag,chi,sharp,niter,scale,x,y)
else:
return(errmag,chi,sharp,niter,scale)
rhosq[lilrho] = 0.5*rhosq[lilrho]
dfdsig = exp(-rhosq[lilrho])*(rhosq[lilrho]-1.)
fpos = fsub[lilrho]
fposrow = where(fsub[lilrho]-sky < 0.)[0]
fpos[fposrow] = sky
# FPOS-SKY = raw data minus sky = estimated value of the stellar
# intensity (which presumably is non-negative).
# sig = fpos/self.phpadu + self.ronois + (0.0075*fpos)**2 + (pkerr*(fpos-sky))**2
# D. Jones - noise addition from Scolnic
sig = fsubnoise[lilrho]**2
numer = sum(dfdsig*df[lilrho]/sig)
denom = sum(dfdsig**2/sig)
# Derive the weight of this pixel. First of all, the weight depends
# upon the distance of the pixel from the centroid of the star-- it
# is determined from a function which is very nearly unity for radii
# much smaller than the fitting radius, and which goes to zero for
# radii very near the fitting radius.
chi = sum(wt*abs(relerr))
sumwt = sum(wt)
wt = wt/sigsq #Scale weight to inverse square of expected mean error
if niter >= 2: #Reduce weight of a bad pixel
wt = wt/(1.+(0.4*relerr/chiold)**8)
v = zeros(3) #Compute vector of residuals and the normal matrix.
c = zeros([3,3])
if not badflag:
for kk in range(3):
v[kk] = sum(df*t[kk,:]*wt)
for ll in range(3): c[ll,kk] = sum(t[kk,:]*t[ll,:]*wt)
else:
for kk in range(3):
v[kk] = sum(df*t[kk,goodind]*wt)
for ll in range(3): c[ll,kk] = sum(t[kk,goodind]*t[ll,goodind]*wt)
# Compute the (robust) goodness-of-fit index CHI.
# CHI is pulled toward its expected value of unity before being stored
# in CHIOLD to keep the statistics of a small number of pixels from
# completely dominating the error analysis.
if sumwt > 3.0:
chi = 1.2533*chi*sqrt(1./(sumwt*(sumwt-3.)))
chiold = ((sumwt-3.)*chi+3.)/sumwt
if not isnan(sum(c)):
try:
c = linalg.inv(c) #Invert the normal matrix
except:
scale=np.nan
errmag=np.nan
chi=np.nan
sharp=np.nan
if xyout:
return(errmag,chi,sharp,niter,scale,x,y)
else:
return(errmag,chi,sharp,niter,scale)
dt = matrix(v)*c #Compute parameter corrections
dt = array(dt)[0]
# In the beginning, the brightness of the star will not be permitted
# to change by more than two magnitudes per iteration (that is to say,
# if the estimate is getting brighter, it may not get brighter by
# more than 525% per iteration, and if it is getting fainter, it may
# not get fainter by more than 84% per iteration). The x and y
# coordinates of the centroid will be allowed to change by no more
# than one-half pixel per iteration. Any time that a parameter
# correction changes sign, the maximum permissible change in that
# parameter will be reduced by a factor of 2.
div = where( dtold*dt < -1.e-38)[0]
nbad = len(div)
if nbad > 0: clamp[div] = clamp[div]/2.
dtold = dt
adt = abs(dt)
denom2 = ( dt[0]/(5.25*scale))
if denom2 < (-1*dt[0]/(0.84*scale)): denom2 = (-1*dt[0]/(0.84*scale))
scale = scale+dt[0]/(1 + denom2/clamp[0])
x = x + dt[1]/(1.+adt[1]/(0.5*clamp[1]))
y = y + dt[2]/(1.+adt[2]/(0.5*clamp[2]))
redo = 0
# Convergence criteria: if the most recent computed correction to the
# brightness is larger than 0.1% or than 0.05 * sigma(brightness),
# whichever is larger, OR if the absolute change in X or Y is
# greater than 0.01 pixels, convergence has not been achieved.
sharp = 2.*gauss[3]*gauss[4]*numer/(gauss[0]*scale*denom)
errmag = chiold*sqrt(c[0,0])
if ( adt[0] > 0.05*errmag) or (adt[0] > 0.001*scale): redo = 1
if (adt[1] > 0.01) or (adt[2] > 0.01): redo = 1
if debug: print(niter,x,y,scale,errmag,chiold,sharp)
if niter >= 3: loop=False #At least 3 iterations required
# If the solution has gone 25 iterations, OR if the standard error of
# the brightness is greater than 200%, give up.
if (redo and (errmag <= 1.9995) and (niter < maxiter) ): loop=True
# if sharp < -99.999: sharp = -99.999
# elif sharp > 99.999: sharp = 99.999
if xyout:
return(errmag,chi,sharp,niter,scale,x,y)
else:
return(errmag,chi,sharp,niter,scale)
def item_remove(index,array):
mask = ones(array.shape,dtype=bool)
mask[index] = False
smaller_array = array[mask]
return(smaller_array)
|
py | 1a36310f204780380b19ba14c72e95b6b7dfe2c1 | import tensorflow as tf
data_path = 'train.tfrecord'
with tf.Session() as sess:
feature = {"image_raw": tf.FixedLenFeature([], tf.string),
"label": tf.FixedLenFeature([], tf.int64)}
# Create a list of filenames and pass it to a queue
filename_queue = tf.train.string_input_producer([data_path], num_epochs=1)
# Define a reader and read the next record
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
# Decode the record read by the reader
features = tf.parse_single_example(serialized_example, features=feature)
# Convert the image data from string back to the numbers
image = tf.decode_raw(features["image_raw"], tf.float32)
# Cast label data into int32
label = tf.cast(features["label"], tf.int32)
# Reshape image data into the original shape
image = tf.reshape(image, [224, 224, 3])
# Any preprocessing here ...
# Creates batches by randomly shuffling tensors
images, labels = tf.train.shuffle_batch(
[image, label], batch_size=10, capacity=30, num_threads=1, min_after_dequeue=10)
|
py | 1a3631d8960d1b8e8d9e16ff1d87613f0eb7bdde | from main_algorithm import MainAlgorithm
m = MainAlgorithm()
from api import Matrices
a = Matrices()
a.produce("PD")
|
py | 1a3632051efc44f1842e5d453c7350f7cb967ea5 | import os
SECRET_KEY = os.environ.get("SECRET_KEY", None)
|
py | 1a36326789317324ed6fb2d107ef2431a7ab8941 | import asyncio
import json
import logging
import time
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import traceback
import aiohttp
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
import chia.server.ws_connection as ws # lgtm [py/import-and-import-from]
from chia.consensus.coinbase import create_puzzlehash_for_pk
from chia.consensus.constants import ConsensusConstants
from chia.daemon.keychain_proxy import (
KeychainProxy,
KeychainProxyConnectionFailure,
connect_to_keychain_and_validate,
wrap_local_keychain,
)
from chia.pools.pool_config import PoolWalletConfig, load_pool_config
from chia.protocols import farmer_protocol, harvester_protocol
from chia.protocols.pool_protocol import (
ErrorResponse,
get_current_authentication_token,
GetFarmerResponse,
PoolErrorCode,
PostFarmerPayload,
PostFarmerRequest,
PutFarmerPayload,
PutFarmerRequest,
AuthenticationPayload,
)
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ssl_context_for_root
from chia.server.ws_connection import WSChiaConnection
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.bech32m import decode_puzzle_hash
from chia.util.byte_types import hexstr_to_bytes
from chia.util.config import load_config, save_config, config_path_for_filename
from chia.util.hash import std_hash
from chia.util.ints import uint8, uint16, uint32, uint64
from chia.util.keychain import Keychain
from chia.wallet.derive_keys import (
master_sk_to_farmer_sk,
master_sk_to_pool_sk,
master_sk_to_wallet_sk,
find_authentication_sk,
find_owner_sk,
)
from chia.wallet.puzzles.singleton_top_layer import SINGLETON_MOD
singleton_mod_hash = SINGLETON_MOD.get_tree_hash()
log = logging.getLogger(__name__)
UPDATE_POOL_INFO_INTERVAL: int = 3600
UPDATE_POOL_FARMER_INFO_INTERVAL: int = 300
UPDATE_HARVESTER_CACHE_INTERVAL: int = 90
"""
HARVESTER PROTOCOL (FARMER <-> HARVESTER)
"""
class HarvesterCacheEntry:
def __init__(self):
self.data: Optional[dict] = None
self.last_update: float = 0
def bump_last_update(self):
self.last_update = time.time()
def set_data(self, data):
self.data = data
self.bump_last_update()
def needs_update(self, update_interval: int):
return time.time() - self.last_update > update_interval
def expired(self, update_interval: int):
return time.time() - self.last_update > update_interval * 10
class Farmer:
def __init__(
self,
root_path: Path,
farmer_config: Dict,
pool_config: Dict,
consensus_constants: ConsensusConstants,
local_keychain: Optional[Keychain] = None,
):
self.keychain_proxy: Optional[KeychainProxy] = None
self.local_keychain = local_keychain
self._root_path = root_path
self.config = farmer_config
self.pool_config = pool_config
# Keep track of all sps, keyed on challenge chain signage point hash
self.sps: Dict[bytes32, List[farmer_protocol.NewSignagePoint]] = {}
# Keep track of harvester plot identifier (str), target sp index, and PoSpace for each challenge
self.proofs_of_space: Dict[bytes32, List[Tuple[str, ProofOfSpace]]] = {}
# Quality string to plot identifier and challenge_hash, for use with harvester.RequestSignatures
self.quality_str_to_identifiers: Dict[bytes32, Tuple[str, bytes32, bytes32, bytes32]] = {}
# number of responses to each signage point
self.number_of_responses: Dict[bytes32, int] = {}
# A dictionary of keys to time added. These keys refer to keys in the above 4 dictionaries. This is used
# to periodically clear the memory
self.cache_add_time: Dict[bytes32, uint64] = {}
# Interval to request plots from connected harvesters
self.update_harvester_cache_interval = UPDATE_HARVESTER_CACHE_INTERVAL
self.cache_clear_task: asyncio.Task
self.update_pool_state_task: asyncio.Task
self.constants = consensus_constants
self._shut_down = False
self.server: Any = None
self.state_changed_callback: Optional[Callable] = None
self.log = log
async def ensure_keychain_proxy(self) -> KeychainProxy:
if not self.keychain_proxy:
if self.local_keychain:
self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=self.log)
else:
self.keychain_proxy = await connect_to_keychain_and_validate(self._root_path, self.log)
if not self.keychain_proxy:
raise KeychainProxyConnectionFailure("Failed to connect to keychain service")
return self.keychain_proxy
async def get_all_private_keys(self):
keychain_proxy = await self.ensure_keychain_proxy()
return await keychain_proxy.get_all_private_keys()
async def setup_keys(self):
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in await self.get_all_private_keys()]
self._private_keys = [master_sk_to_farmer_sk(sk) for sk in self.all_root_sks] + [
master_sk_to_pool_sk(sk) for sk in self.all_root_sks
]
if len(self.get_public_keys()) == 0:
error_str = "No keys exist. Please run 'chia keys generate' or open the UI."
raise RuntimeError(error_str)
# This is the farmer configuration
self.farmer_target_encoded = self.config["xfl_target_address"]
self.farmer_target = decode_puzzle_hash(self.farmer_target_encoded)
self.pool_public_keys = [G1Element.from_bytes(bytes.fromhex(pk)) for pk in self.config["pool_public_keys"]]
# This is the self pooling configuration, which is only used for original self-pooled plots
self.pool_target_encoded = self.pool_config["xfl_target_address"]
self.pool_target = decode_puzzle_hash(self.pool_target_encoded)
self.pool_sks_map: Dict = {}
for key in self.get_private_keys():
self.pool_sks_map[bytes(key.get_g1())] = key
assert len(self.farmer_target) == 32
assert len(self.pool_target) == 32
if len(self.pool_sks_map) == 0:
error_str = "No keys exist. Please run 'chia keys generate' or open the UI."
raise RuntimeError(error_str)
# The variables below are for use with an actual pool
# From p2_singleton_puzzle_hash to pool state dict
self.pool_state: Dict[bytes32, Dict] = {}
# From public key bytes to PrivateKey
self.authentication_keys: Dict[bytes, PrivateKey] = {}
# Last time we updated pool_state based on the config file
self.last_config_access_time: uint64 = uint64(0)
self.harvester_cache: Dict[str, Dict[str, HarvesterCacheEntry]] = {}
async def _start(self):
await self.setup_keys()
self.update_pool_state_task = asyncio.create_task(self._periodically_update_pool_state_task())
self.cache_clear_task = asyncio.create_task(self._periodically_clear_cache_and_refresh_task())
def _close(self):
self._shut_down = True
async def _await_closed(self):
await self.cache_clear_task
await self.update_pool_state_task
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
async def on_connect(self, peer: WSChiaConnection):
# Sends a handshake to the harvester
self.state_changed("add_connection", {})
handshake = harvester_protocol.HarvesterHandshake(
self.get_public_keys(),
self.pool_public_keys,
)
if peer.connection_type is NodeType.HARVESTER:
msg = make_msg(ProtocolMessageTypes.harvester_handshake, handshake)
await peer.send_message(msg)
def set_server(self, server):
self.server = server
def state_changed(self, change: str, data: Dict[str, Any]):
if self.state_changed_callback is not None:
self.state_changed_callback(change, data)
def handle_failed_pool_response(self, p2_singleton_puzzle_hash: bytes32, error_message: str):
self.log.error(error_message)
self.pool_state[p2_singleton_puzzle_hash]["pool_errors_24h"].append(
ErrorResponse(uint16(PoolErrorCode.REQUEST_FAILED.value), error_message).to_json_dict()
)
def on_disconnect(self, connection: ws.WSChiaConnection):
self.log.info(f"peer disconnected {connection.get_peer_logging()}")
self.state_changed("close_connection", {})
async def _pool_get_pool_info(self, pool_config: PoolWalletConfig) -> Optional[Dict]:
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(
f"{pool_config.pool_url}/pool_info", ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log)
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"GET /pool_info response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /pool_info {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /pool_info {pool_config.pool_url}, {e}"
)
return None
async def _pool_get_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, authentication_sk: PrivateKey
) -> Optional[Dict]:
assert authentication_sk.get_g1() == pool_config.authentication_public_key
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_farmer", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
get_farmer_params = {
"launcher_id": pool_config.launcher_id.hex(),
"authentication_token": authentication_token,
"signature": bytes(signature).hex(),
}
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(
f"{pool_config.pool_url}/farmer",
params=get_farmer_params,
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"GET /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_post_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> Optional[Dict]:
post_farmer_payload: PostFarmerPayload = PostFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
pool_config.authentication_public_key,
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, post_farmer_payload.get_hash())
post_farmer_request = PostFarmerRequest(post_farmer_payload, signature)
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_config.pool_url}/farmer",
json=post_farmer_request.to_json_dict(),
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"POST /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in POST /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in POST /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_put_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> Optional[Dict]:
put_farmer_payload: PutFarmerPayload = PutFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
pool_config.authentication_public_key,
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, put_farmer_payload.get_hash())
put_farmer_request = PutFarmerRequest(put_farmer_payload, signature)
try:
async with aiohttp.ClientSession() as session:
async with session.put(
f"{pool_config.pool_url}/farmer",
json=put_farmer_request.to_json_dict(),
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"PUT /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in PUT /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in PUT /farmer {pool_config.pool_url}, {e}"
)
return None
async def update_pool_state(self):
config = load_config(self._root_path, "config.yaml")
pool_config_list: List[PoolWalletConfig] = load_pool_config(self._root_path)
for pool_config in pool_config_list:
p2_singleton_puzzle_hash = pool_config.p2_singleton_puzzle_hash
try:
authentication_sk: Optional[PrivateKey] = await find_authentication_sk(
self.all_root_sks, pool_config.authentication_public_key
)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for pk: {pool_config.authentication_public_key}")
continue
if p2_singleton_puzzle_hash not in self.pool_state:
self.authentication_keys[bytes(pool_config.authentication_public_key)] = authentication_sk
self.pool_state[p2_singleton_puzzle_hash] = {
"points_found_since_start": 0,
"points_found_24h": [],
"points_acknowledged_since_start": 0,
"points_acknowledged_24h": [],
"next_farmer_update": 0,
"next_pool_info_update": 0,
"current_points": 0,
"current_difficulty": None,
"pool_errors_24h": [],
"authentication_token_timeout": None,
}
self.log.info(f"Added pool: {pool_config}")
pool_state = self.pool_state[p2_singleton_puzzle_hash]
pool_state["pool_config"] = pool_config
# Skip state update when self pooling
if pool_config.pool_url == "":
continue
enforce_https = config["full_node"]["selected_network"] == "mainnet"
if enforce_https and not pool_config.pool_url.startswith("https://"):
self.log.error(f"Pool URLs must be HTTPS on mainnet {pool_config.pool_url}")
continue
# TODO: Improve error handling below, inform about unexpected failures
if time.time() >= pool_state["next_pool_info_update"]:
# Makes a GET request to the pool to get the updated information
pool_info = await self._pool_get_pool_info(pool_config)
if pool_info is not None and "error_code" not in pool_info:
pool_state["authentication_token_timeout"] = pool_info["authentication_token_timeout"]
pool_state["next_pool_info_update"] = time.time() + UPDATE_POOL_INFO_INTERVAL
# Only update the first time from GET /pool_info, gets updated from GET /farmer later
if pool_state["current_difficulty"] is None:
pool_state["current_difficulty"] = pool_info["minimum_difficulty"]
if time.time() >= pool_state["next_farmer_update"]:
authentication_token_timeout = pool_state["authentication_token_timeout"]
async def update_pool_farmer_info() -> Tuple[Optional[GetFarmerResponse], Optional[bool]]:
# Run a GET /farmer to see if the farmer is already known by the pool
response = await self._pool_get_farmer(
pool_config, authentication_token_timeout, authentication_sk
)
farmer_response: Optional[GetFarmerResponse] = None
farmer_known: Optional[bool] = None
if response is not None:
if "error_code" not in response:
farmer_response = GetFarmerResponse.from_json_dict(response)
if farmer_response is not None:
pool_state["current_difficulty"] = farmer_response.current_difficulty
pool_state["current_points"] = farmer_response.current_points
pool_state["next_farmer_update"] = time.time() + UPDATE_POOL_FARMER_INFO_INTERVAL
else:
farmer_known = response["error_code"] != PoolErrorCode.FARMER_NOT_KNOWN.value
self.log.error(
"update_pool_farmer_info failed: "
f"{response['error_code']}, {response['error_message']}"
)
return farmer_response, farmer_known
if authentication_token_timeout is not None:
farmer_info, farmer_is_known = await update_pool_farmer_info()
if farmer_info is None and farmer_is_known is not None and not farmer_is_known:
# Make the farmer known on the pool with a POST /farmer
owner_sk = await find_owner_sk(self.all_root_sks, pool_config.owner_public_key)
post_response = await self._pool_post_farmer(
pool_config, authentication_token_timeout, owner_sk
)
if post_response is not None and "error_code" not in post_response:
self.log.info(
f"Welcome message from {pool_config.pool_url}: "
f"{post_response['welcome_message']}"
)
# Now we should be able to update the local farmer info
farmer_info, farmer_is_known = await update_pool_farmer_info()
if farmer_info is None and not farmer_is_known:
self.log.error("Failed to update farmer info after POST /farmer.")
# Update the payout instructions on the pool if required
if (
farmer_info is not None
and pool_config.payout_instructions.lower() != farmer_info.payout_instructions.lower()
):
owner_sk = await find_owner_sk(self.all_root_sks, pool_config.owner_public_key)
put_farmer_response_dict = await self._pool_put_farmer(
pool_config, authentication_token_timeout, owner_sk
)
try:
# put_farmer_response: PutFarmerResponse = PutFarmerResponse.from_json_dict(
# put_farmer_response_dict
# )
# if put_farmer_response.payout_instructions:
# self.log.info(
# f"Farmer information successfully updated on the pool {pool_config.pool_url}"
# )
# TODO: Fix Streamable implementation and recover the above.
if put_farmer_response_dict["payout_instructions"]:
self.log.info(
f"Farmer information successfully updated on the pool {pool_config.pool_url}"
)
else:
raise Exception
except Exception:
self.log.error(
f"Failed to update farmer information on the pool {pool_config.pool_url}"
)
else:
self.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Exception in update_pool_state for {pool_config.pool_url}, {e} {tb}")
def get_public_keys(self):
return [child_sk.get_g1() for child_sk in self._private_keys]
def get_private_keys(self):
return self._private_keys
async def get_reward_targets(self, search_for_private_key: bool) -> Dict:
if search_for_private_key:
all_sks = await self.get_all_private_keys()
stop_searching_for_farmer, stop_searching_for_pool = False, False
for i in range(500):
if stop_searching_for_farmer and stop_searching_for_pool and i > 0:
break
for sk, _ in all_sks:
ph = create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1())
if ph == self.farmer_target:
stop_searching_for_farmer = True
if ph == self.pool_target:
stop_searching_for_pool = True
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
"have_farmer_sk": stop_searching_for_farmer,
"have_pool_sk": stop_searching_for_pool,
}
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
}
def set_reward_targets(self, farmer_target_encoded: Optional[str], pool_target_encoded: Optional[str]):
config = load_config(self._root_path, "config.yaml")
if farmer_target_encoded is not None:
self.farmer_target_encoded = farmer_target_encoded
self.farmer_target = decode_puzzle_hash(farmer_target_encoded)
config["farmer"]["xfl_target_address"] = farmer_target_encoded
if pool_target_encoded is not None:
self.pool_target_encoded = pool_target_encoded
self.pool_target = decode_puzzle_hash(pool_target_encoded)
config["pool"]["xfl_target_address"] = pool_target_encoded
save_config(self._root_path, "config.yaml", config)
async def set_payout_instructions(self, launcher_id: bytes32, payout_instructions: str):
for p2_singleton_puzzle_hash, pool_state_dict in self.pool_state.items():
if launcher_id == pool_state_dict["pool_config"].launcher_id:
config = load_config(self._root_path, "config.yaml")
new_list = []
for list_element in config["pool"]["pool_list"]:
if hexstr_to_bytes(list_element["launcher_id"]) == bytes(launcher_id):
list_element["payout_instructions"] = payout_instructions
new_list.append(list_element)
config["pool"]["pool_list"] = new_list
save_config(self._root_path, "config.yaml", config)
# Force a GET /farmer which triggers the PUT /farmer if it detects the changed instructions
pool_state_dict["next_farmer_update"] = 0
return
self.log.warning(f"Launcher id: {launcher_id} not found")
async def generate_login_link(self, launcher_id: bytes32) -> Optional[str]:
for pool_state in self.pool_state.values():
pool_config: PoolWalletConfig = pool_state["pool_config"]
if pool_config.launcher_id == launcher_id:
authentication_sk: Optional[PrivateKey] = await find_authentication_sk(
self.all_root_sks, pool_config.authentication_public_key
)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for pk: {pool_config.authentication_public_key}")
continue
assert authentication_sk.get_g1() == pool_config.authentication_public_key
authentication_token_timeout = pool_state["authentication_token_timeout"]
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_login", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
return (
pool_config.pool_url
+ f"/login?launcher_id={launcher_id.hex()}&authentication_token={authentication_token}"
f"&signature={bytes(signature).hex()}"
)
return None
async def update_cached_harvesters(self) -> bool:
# First remove outdated cache entries
self.log.debug(f"update_cached_harvesters cache entries: {len(self.harvester_cache)}")
remove_hosts = []
for host, host_cache in self.harvester_cache.items():
remove_peers = []
for peer_id, peer_cache in host_cache.items():
# If the peer cache is expired it means the harvester didn't respond for too long
if peer_cache.expired(self.update_harvester_cache_interval):
remove_peers.append(peer_id)
for key in remove_peers:
del host_cache[key]
if len(host_cache) == 0:
self.log.debug(f"update_cached_harvesters remove host: {host}")
remove_hosts.append(host)
for key in remove_hosts:
del self.harvester_cache[key]
# Now query each harvester and update caches
updated = False
for connection in self.server.get_connections(NodeType.HARVESTER):
cache_entry = await self.get_cached_harvesters(connection)
if cache_entry.needs_update(self.update_harvester_cache_interval):
self.log.debug(f"update_cached_harvesters update harvester: {connection.peer_node_id}")
cache_entry.bump_last_update()
response = await connection.request_plots(
harvester_protocol.RequestPlots(), timeout=self.update_harvester_cache_interval
)
if response is not None:
if isinstance(response, harvester_protocol.RespondPlots):
new_data: Dict = response.to_json_dict()
if cache_entry.data != new_data:
updated = True
self.log.debug(f"update_cached_harvesters cache updated: {connection.peer_node_id}")
else:
self.log.debug(f"update_cached_harvesters no changes for: {connection.peer_node_id}")
cache_entry.set_data(new_data)
else:
self.log.error(
f"Invalid response from harvester:"
f"peer_host {connection.peer_host}, peer_node_id {connection.peer_node_id}"
)
else:
self.log.error(
f"Harvester '{connection.peer_host}/{connection.peer_node_id}' did not respond: "
f"(version mismatch or time out {UPDATE_HARVESTER_CACHE_INTERVAL}s)"
)
return updated
async def get_cached_harvesters(self, connection: WSChiaConnection) -> HarvesterCacheEntry:
host_cache = self.harvester_cache.get(connection.peer_host)
if host_cache is None:
host_cache = {}
self.harvester_cache[connection.peer_host] = host_cache
node_cache = host_cache.get(connection.peer_node_id.hex())
if node_cache is None:
node_cache = HarvesterCacheEntry()
host_cache[connection.peer_node_id.hex()] = node_cache
return node_cache
async def get_harvesters(self) -> Dict:
harvesters: List = []
for connection in self.server.get_connections(NodeType.HARVESTER):
self.log.debug(f"get_harvesters host: {connection.peer_host}, node_id: {connection.peer_node_id}")
cache_entry = await self.get_cached_harvesters(connection)
if cache_entry.data is not None:
harvester_object: dict = dict(cache_entry.data)
harvester_object["connection"] = {
"node_id": connection.peer_node_id.hex(),
"host": connection.peer_host,
"port": connection.peer_port,
}
harvesters.append(harvester_object)
else:
self.log.debug(f"get_harvesters no cache: {connection.peer_host}, node_id: {connection.peer_node_id}")
return {"harvesters": harvesters}
async def _periodically_update_pool_state_task(self):
time_slept: uint64 = uint64(0)
config_path: Path = config_path_for_filename(self._root_path, "config.yaml")
while not self._shut_down:
# Every time the config file changes, read it to check the pool state
stat_info = config_path.stat()
if stat_info.st_mtime > self.last_config_access_time:
# If we detect the config file changed, refresh private keys first just in case
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in await self.get_all_private_keys()]
self.last_config_access_time = stat_info.st_mtime
await self.update_pool_state()
time_slept = uint64(0)
elif time_slept > 60:
await self.update_pool_state()
time_slept = uint64(0)
time_slept += 1
await asyncio.sleep(1)
async def _periodically_clear_cache_and_refresh_task(self):
time_slept: uint64 = uint64(0)
refresh_slept = 0
while not self._shut_down:
try:
if time_slept > self.constants.SUB_SLOT_TIME_TARGET:
now = time.time()
removed_keys: List[bytes32] = []
for key, add_time in self.cache_add_time.items():
if now - float(add_time) > self.constants.SUB_SLOT_TIME_TARGET * 3:
self.sps.pop(key, None)
self.proofs_of_space.pop(key, None)
self.quality_str_to_identifiers.pop(key, None)
self.number_of_responses.pop(key, None)
removed_keys.append(key)
for key in removed_keys:
self.cache_add_time.pop(key, None)
time_slept = uint64(0)
log.debug(
f"Cleared farmer cache. Num sps: {len(self.sps)} {len(self.proofs_of_space)} "
f"{len(self.quality_str_to_identifiers)} {len(self.number_of_responses)}"
)
time_slept += 1
refresh_slept += 1
# Periodically refresh GUI to show the correct download/upload rate.
if refresh_slept >= 30:
self.state_changed("add_connection", {})
refresh_slept = 0
# Handles harvester plots cache cleanup and updates
if await self.update_cached_harvesters():
self.state_changed("new_plots", await self.get_harvesters())
except Exception:
log.error(f"_periodically_clear_cache_and_refresh_task failed: {traceback.format_exc()}")
await asyncio.sleep(1)
|
py | 1a36340a48fd45fb929e92a7d86076565cacb921 | import os
import tensorflow as tf
from datetime import datetime
import sys
sys.path.append('')
import helper
# Load the dataset
(train_images, train_labels), (test_images, test_labels) = helper.load_data()
# Flat and normalize
train_images = train_images /255.0
test_images = test_images / 255.0
# Define a model
def create_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
# Create a model
model = create_model()
# Display the model's archtecture
model.summary()
# Train
logdir="SimpleANN/logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
model.fit(train_images,
train_labels,
epochs=10,
batch_size=32,
validation_data=(test_images, test_labels),
callbacks=[tensorboard_callback])
# Evaluate the model
loss, acc = model.evaluate(test_images, test_labels, verbose=2)
print("Accuracy: {:5.2f}%".format(100*acc))
# Save the model
model.save('SimpleANN/simple_ann_model.model')
|
py | 1a36342a5478d7af0c749fa6de2916ee2f66e9b6 | """create tables
Revision ID: 5eabe39be597
Revises:
Create Date: 2021-12-25 02:07:37.870336
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "5eabe39be597"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.execute(
"""
CREATE TYPE EmployeeStatus AS ENUM ('works', 'blacklist', 'fired');
CREATE TYPE TaskPriority AS ENUM ('minor', 'normal', 'major', 'hot');
CREATE TYPE TaskStatus AS ENUM ('done', 'in_progress', 'selected_for_development');
CREATE TYPE LeaveType AS ENUM ('vacation', 'sick_leave');
CREATE TYPE PhoneStatus AS ENUM ('active', 'deprecated');
CREATE TYPE LeaveStatus AS ENUM ('confirmed', 'rejected', 'pending');
CREATE TABLE IF NOT EXISTS employees(
id serial PRIMARY KEY,
first_name VARCHAR(255) NOT NULL,
middle_name VARCHAR(255),
last_name VARCHAR(255) NOT NULL,
info TEXT,
date_of_birth DATE,
hired_on DATE NOT NULL,
fired_on DATE,
status EmployeeStatus NOT NULL DEFAULT 'works'
);
CREATE TABLE IF NOT EXISTS tasks(
id serial PRIMARY KEY,
title VARCHAR(255) NOT NULL,
description TEXT,
priority TaskPriority NOT NULL DEFAULT 'normal',
status TaskStatus NOT NULL DEFAULT 'selected_for_development',
assignee_id INT,
FOREIGN KEY (assignee_id) REFERENCES employees(id),
reporter_id INT,
FOREIGN KEY (reporter_id) REFERENCES employees(id),
created_at timestamp,
employee_id INT NOT NULL,
FOREIGN KEY (employee_id) REFERENCES employees(id)
);
CREATE TABLE IF NOT EXISTS links(
id serial PRIMARY KEY,
name VARCHAR(255),
link VARCHAR(511),
employee_id INT NOT NULL,
FOREIGN KEY (employee_id) REFERENCES employees(id)
);
CREATE TABLE IF NOT EXISTS leaves(
id serial PRIMARY KEY,
leave_type LeaveType NOT NULL DEFAULT 'vacation',
start_date DATE NOT NULL,
end_date DATE NOT NULL,
status LeaveStatus NOT NULL DEFAULT 'pending',
approved_by INT,
FOREIGN KEY (approved_by) REFERENCES employees(id),
requested_at TIMESTAMP NOT NULL,
approved_at TIMESTAMP,
employee_id INT NOT NULL,
FOREIGN KEY (employee_id) REFERENCES employees(id)
);
CREATE TABLE IF NOT EXISTS phone_numbers(
id serial PRIMARY KEY,
phone VARCHAR(255) ,
status PhoneStatus NOT NULL DEFAULT 'active',
employee_id INT NOT NULL,
FOREIGN KEY (employee_id) REFERENCES employees(id)
);
"""
)
def downgrade():
pass
|
py | 1a36353ea1560f17a6c606d4c3fb5023ff38abd1 | """The base command."""
class Base(object):
"""A base command."""
def __init__(self, options, *args, **kwargs):
self.options = options
self.args = args
self.kwargs = kwargs
def run(self):
raise NotImplementedError('Sorry, I cannot do that!')
|
py | 1a36356c8305c94bac483f36a979fa733ecbb117 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from .networks.mobilenet import MobileNetV3
def create_model(heads, head_conv):
model = MobileNetV3(heads, final_kernel=1, head_conv=head_conv)
return model
def load_model(model, model_path):
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
print('loaded {}, epoch {}'.format(model_path, checkpoint['epoch']))
state_dict_ = checkpoint['state_dict']
state_dict = {}
# convert data_parallal to model
for k in state_dict_:
if k.startswith('module') and not k.startswith('module_list'):
state_dict[k[7:]] = state_dict_[k]
else:
state_dict[k] = state_dict_[k]
model_state_dict = model.state_dict()
# check loaded parameters and created model parameters
for k in state_dict:
if k in model_state_dict:
if state_dict[k].shape != model_state_dict[k].shape:
print('Skip loading parameter {}, required shape{}, '\
'loaded shape{}.'.format(
k, model_state_dict[k].shape, state_dict[k].shape))
state_dict[k] = model_state_dict[k]
else:
print('Drop parameter {}.'.format(k))
for k in model_state_dict:
if not (k in state_dict):
print('No param {}.'.format(k))
state_dict[k] = model_state_dict[k]
model.load_state_dict(state_dict, strict=False)
return model
|
py | 1a3636e10072455989a9b939270c793f41cdf8e3 | import sys
import os
import nbsphinx
import re
# credit to https://github.com/rodluger/starry_process
# Add the CWD to the path
sys.path.insert(1, os.path.dirname(os.path.abspath(__file__)))
# Hack `nbsphinx` to enable us to hide certain input cells in the
# jupyter notebooks. This works with nbsphinx==0.5.0
nbsphinx.RST_TEMPLATE = nbsphinx.RST_TEMPLATE.replace(
"{% block input -%}",
'{% block input -%}\n{%- if not "hide_input" in cell.metadata.tags %}',
)
nbsphinx.RST_TEMPLATE = nbsphinx.RST_TEMPLATE.replace(
"{% endblock input %}", "{% endif %}\n{% endblock input %}"
)
# Hack `nbsphinx` to prevent fixed-height images, which look
# terrible when the window is resized!
nbsphinx.RST_TEMPLATE = re.sub(
r"\{%- if height %\}.*?{% endif %}",
"",
nbsphinx.RST_TEMPLATE,
flags=re.DOTALL,
)
|
py | 1a363713cff3b0c50f28d99782509c712ebb2dd4 | #!/usr/bin/env python
import tensorflow as tf
import math
import os
import numpy as np
# Define parameters
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('epoch_number', None, 'Number of epochs to run trainer.')
flags.DEFINE_integer("batch_size", 1024,
"indicates batch size in a single gpu, default is 1024")
flags.DEFINE_integer("thread_number", 1, "Number of thread to read data")
flags.DEFINE_integer("min_after_dequeue", 100,
"indicates min_after_dequeue of shuffle queue")
flags.DEFINE_string("output_dir", "./tensorboard/",
"indicates training output")
flags.DEFINE_string("model", "deep",
"Model to train, option model: deep, linear")
flags.DEFINE_string("optimizer", "sgd", "optimizer to import")
flags.DEFINE_integer('hidden1', 10, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', 20, 'Number of units in hidden layer 2.')
flags.DEFINE_integer('steps_to_validate', 10,
'Steps to validate and print loss')
flags.DEFINE_string("mode", "train",
"Option mode: train, train_from_scratch, inference")
# For distributed
tf.app.flags.DEFINE_string("ps_hosts", "",
"Comma-separated list of hostname:port pairs")
tf.app.flags.DEFINE_string("worker_hosts", "",
"Comma-separated list of hostname:port pairs")
tf.app.flags.DEFINE_string("job_name", "", "One of 'ps', 'worker'")
tf.app.flags.DEFINE_integer("task_index", 0, "Index of task within the job")
# Hyperparameters
learning_rate = FLAGS.learning_rate
epoch_number = FLAGS.epoch_number
thread_number = FLAGS.thread_number
batch_size = FLAGS.batch_size
min_after_dequeue = FLAGS.min_after_dequeue
capacity = thread_number * batch_size + min_after_dequeue
FEATURE_SIZE = 9
# Read serialized examples from filename queue
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
"label": tf.FixedLenFeature([], tf.float32),
"features": tf.FixedLenFeature([FEATURE_SIZE], tf.float32),
})
label = features["label"]
features = features["features"]
return label, features
def main(_):
ps_hosts = FLAGS.ps_hosts.split(",")
worker_hosts = FLAGS.worker_hosts.split(",")
cluster = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})
server = tf.train.Server(cluster,
job_name=FLAGS.job_name,
task_index=FLAGS.task_index)
if FLAGS.job_name == "ps":
server.join()
elif FLAGS.job_name == "worker":
with tf.device(tf.train.replica_device_setter(
worker_device="/job:worker/task:%d" % FLAGS.task_index,
cluster=cluster)):
# Read TFRecords files
filename_queue = tf.train.string_input_producer(
tf.train.match_filenames_once("../data/cancer/cancer_train.csv.tfrecords"),
num_epochs=epoch_number)
label, features = read_and_decode(filename_queue)
batch_labels, batch_features = tf.train.shuffle_batch(
[label, features],
batch_size=batch_size,
num_threads=thread_number,
capacity=capacity,
min_after_dequeue=min_after_dequeue)
validate_filename_queue = tf.train.string_input_producer(
tf.train.match_filenames_once(
"../data/cancer/cancer_test.csv.tfrecords"),
num_epochs=epoch_number)
validate_label, validate_features = read_and_decode(
validate_filename_queue)
validate_batch_labels, validate_batch_features = tf.train.shuffle_batch(
[validate_label, validate_features],
batch_size=batch_size,
num_threads=thread_number,
capacity=capacity,
min_after_dequeue=min_after_dequeue)
# Define the model
input_units = FEATURE_SIZE
hidden1_units = FLAGS.hidden1
hidden2_units = FLAGS.hidden2
output_units = 2
# Hidden 1
weights1 = tf.Variable(
tf.truncated_normal([input_units, hidden1_units]),
dtype=tf.float32,
name='weights')
biases1 = tf.Variable(
tf.truncated_normal([hidden1_units]),
name='biases',
dtype=tf.float32)
hidden1 = tf.nn.relu(tf.matmul(batch_features, weights1) + biases1)
# Hidden 2
weights2 = tf.Variable(
tf.truncated_normal([hidden1_units, hidden2_units]),
dtype=tf.float32,
name='weights')
biases2 = tf.Variable(
tf.truncated_normal([hidden2_units]),
name='biases',
dtype=tf.float32)
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights2) + biases2)
# Linear
weights3 = tf.Variable(
tf.truncated_normal([hidden2_units, output_units]),
dtype=tf.float32,
name='weights')
biases3 = tf.Variable(
tf.truncated_normal([output_units]),
name='biases',
dtype=tf.float32)
logits = tf.matmul(hidden2, weights3) + biases3
batch_labels = tf.to_int64(batch_labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=batch_labels)
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
if FLAGS.optimizer == "sgd":
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
optimizer = tf.train.MomentumOptimizer(learning_rate)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step=global_step)
# Compute accuracy
accuracy_hidden1 = tf.nn.relu(tf.matmul(validate_batch_features,
weights1) + biases1)
accuracy_hidden2 = tf.nn.relu(tf.matmul(accuracy_hidden1, weights2)
+ biases2)
accuracy_logits = tf.matmul(accuracy_hidden2, weights3) + biases3
validate_softmax = tf.nn.softmax(accuracy_logits)
validate_batch_labels = tf.to_int64(validate_batch_labels)
correct_prediction = tf.equal(
tf.argmax(validate_softmax, 1), validate_batch_labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Compute auc
validate_batch_labels = tf.cast(validate_batch_labels, tf.int32)
num_labels = 2
sparse_labels = tf.reshape(validate_batch_labels, [-1, 1])
derived_size = tf.shape(validate_batch_labels)[0]
indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1])
concated = tf.concat(axis=1, values=[indices, sparse_labels])
outshape = tf.stack([derived_size, num_labels])
new_validate_batch_labels = tf.sparse_to_dense(concated, outshape,
1.0, 0.0)
_, auc_op = tf.contrib.metrics.streaming_auc(
validate_softmax, new_validate_batch_labels)
# Define inference op
inference_features = tf.placeholder("float", [None, 9])
inference_hidden1 = tf.nn.relu(tf.matmul(inference_features,
weights1) + biases1)
inference_hidden2 = tf.nn.relu(tf.matmul(inference_hidden1,
weights2) + biases2)
inference_logits = tf.matmul(inference_hidden2, weights3) + biases3
inference_softmax = tf.nn.softmax(inference_logits)
inference_op = tf.argmax(inference_softmax, 1)
saver = tf.train.Saver()
steps_to_validate = FLAGS.steps_to_validate
init_op = tf.global_variables_initializer()
tf.summary.scalar('loss', loss)
tf.summary.scalar('accuracy', accuracy)
tf.summary.scalar('auc', auc_op)
summary_op = tf.summary.merge_all()
sv = tf.train.Supervisor(is_chief=(FLAGS.task_index == 0),
logdir="./checkpoint/",
init_op=init_op,
summary_op=summary_op,
saver=saver,
global_step=global_step,
save_model_secs=60)
with sv.managed_session(server.target) as sess:
step = 0
while not sv.should_stop() and step < 1000000:
# Get coordinator and run queues to read data
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
try:
while not coord.should_stop():
# Run train op
_, loss_value, step = sess.run([train_op, loss,
global_step])
if step % steps_to_validate == 0:
accuracy_value, auc_value, summary_value = sess.run(
[accuracy, auc_op, summary_op])
print(
"Step: {}, loss: {}, accuracy: {}, auc: {}".format(
step, loss_value, accuracy_value,
auc_value))
except tf.errors.OutOfRangeError:
print("Done training after reading all data")
finally:
coord.request_stop()
# Wait for threads to exit
coord.join(threads)
if __name__ == "__main__":
tf.app.run()
|
py | 1a3637b1fbdeaa02d9f378916f7eab39c6b06b8f | import svgutils.transform as sg
import geomutils
xmlnsmap = {'svg': 'http://www.w3.org/2000/svg'}
def _get_MP(skname, sk, mp_id):
'''
skname = the name of the sketch
sk = sketch object returned by load_sketch()
nMP = the number ID of the MountingPoint to be returned
'''
mpstart = 'MP_' + skname
if mp_id:
mpstart += '_' + str(mp_id)
baseQuery = '//svg:line[starts-with(@id, "%s")'
endswithQuery = '"%s" = substring(@id, string-length(@id) - 1)]'
u = sk.root.xpath(baseQuery % mpstart + ' and ' + endswithQuery % '_u', namespaces=xmlnsmap)
r = sk.root.xpath(baseQuery % mpstart + ' and ' + endswithQuery % '_r', namespaces=xmlnsmap)
return (u, r)
def register_sketches(skname1, sk1, skname2, sk2, mp1=1, mp2=1):
def parse_line(elem):
return ((float(elem.get('x1')), float(elem.get('y1'))),
(float(elem.get('x2')), float(elem.get('y2'))))
print _get_MP(skname1, sk1, mp1)
# Get mounting points guide lines
up1 = parse_line(sk1.find_id('MP_' + skname1 + '_' + str(mp1) + '_u').root)
rg1 = parse_line(sk1.find_id('MP_' + skname1 + '_' + str(mp1) + '_r').root)
up2 = parse_line(sk2.find_id('MP_' + skname2 + '_' + str(mp2) + '_u').root)
rg2 = parse_line(sk2.find_id('MP_' + skname2 + '_' + str(mp2) + '_r').root)
# Compute scales to make the two figures match in size
fig1scale = geomutils.line_len(up1)
fig2scale = geomutils.line_len(up2)
midscale = (fig1scale + fig2scale) / 2. # The average of the two scales
fig1_normscale = fig1scale / midscale
fig2_normscale = fig2scale / midscale
print '--scales-->', fig1scale, fig2scale
print '--normscales-->', fig1_normscale, fig2_normscale
# 2D displacement of the second figure
displacement = (up1[0][0]*fig2_normscale - up2[0][0]*fig1_normscale,
up1[0][1]*fig2_normscale - up2[0][1]*fig1_normscale)
# Get the plot objects
plot1 = sk1.getroot()
plot2 = sk2.getroot()
plot1.moveto(0, 0, scale=fig2_normscale)
plot2.moveto(0 + displacement[0], 0 + displacement[1], scale=fig1_normscale)
# Rotation
plot1.rotate(-90 - geomutils.line_angle(up1), up1[0][0], up1[0][1])
plot2.rotate(-90 - geomutils.line_angle(up2), up2[0][0], up2[0][1])
# TODO: Flip if needed
# Create new SVG figure
fig = sg.SVGFigure("1500px", "1500px") # FIXME: hardcoded size
# Append plots and labels to figure
fig.append([plot1, plot2])
return fig
def load_sketch(figname):
'''
Loads SVG sketch file and returns an object
'''
return sg.fromfile(figname + '.svg')
if __name__ == '__main__':
figname1 = 'flying-thing'
figname2 = 'mpt2'
# Load test figures
fig1 = load_sketch(figname1)
fig2 = load_sketch(figname2)
# Match and combine the two figures
regfig = register_sketches(figname1, fig1, figname2, fig2)
# Save generated SVG file
regfig.save("fig_final.svg")
|
py | 1a3638da807b4b81a3b785be9cf07acfce57649e | import urllib
import requests
from appi.debugging.log_handling import setup_logger, close_log_handlers
class APIController:
def __init__(self, base_url, table_name, log_filename):
self.base_url = base_url
self.table_name = table_name
self.column_url = self.base_url + f"api/v1/resources/{self.table_name}/columns"
self.add_url = self.base_url + "add_animal"
self.filter_url = self.base_url + f"api/v1/resources/{self.table_name}"
self.delete_url = self.base_url + f"delete_animal"
self.log = setup_logger(log_filename)
self.columns = self.get_columns()
self.log.info(f"Available columns are {self.columns}")
def __del__(self):
close_log_handlers(self.log)
def get_columns(self):
columns, success = self.make_and_log_http_call(self.column_url, "Getting table columns")
return columns
def query_data(self, filter):
payload = urllib.parse.urlencode(filter)
data, success = self.make_and_log_http_call(self.filter_url, f"Getting data for {filter}", payload=payload)
return data
def add_data(self, data):
self.make_and_log_http_call(self.add_url, f"Adding data: {data}", json=False, payload=data)
def delete_data(self, name):
self.make_and_log_http_call(self.delete_url, f"Deleting data: {name}", json=False, payload=name)
def make_and_log_http_call(self, url, code_str, json=True, payload=None):
self.log.info("Calling: " + str(url))
try:
if payload:
response = requests.get(url, params=payload)
else:
response = requests.get(url)
self.log.info(code_str + " code: " + str(response.status_code))
self.log.debug(code_str + " text: " + response.text)
if json:
return response.json(), response.status_code == 200
else:
return response, response.status_code == 200
except Exception as e:
self.log.warning("Request failed")
self.log.debug(str(e))
return None, False
def main():
animals_controller = APIController("http://localhost/", "animals", "animals_controller.log")
data = {"name": "Bob", "animal_type": "Dog", "age": 1, "price": 30}
animals_controller.add_data(data)
data = {"name": "Lars", "animal_type": "Horse", "age": 2, "price": 10}
animals_controller.add_data(data)
data = {"name": "Helen", "animal_type": "Cat", "age": 3, "price": 20}
animals_controller.add_data(data)
data = {"name": "Max", "animal_type": "Fish", "age": 4, "price": 25}
animals_controller.add_data(data)
filter = {"price": {"gte": 20}}
print(animals_controller.query_data(filter))
filter = {"name": "Max", "price": {"gte": 20, "lt": 30}}
print(animals_controller.query_data(filter))
animals_controller.delete_data({"name": "Max"})
print(animals_controller.query_data(filter))
books_controller = APIController("http://localhost/", "books", "books_controller.log")
filter = {"title": "Ancillary Justice"}
print(books_controller.query_data(filter))
if __name__ == '__main__':
main()
|
py | 1a3639a4250294febd044dd18f9af834c01eb92d | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import optparse
import sys
import os
import logging
import urllib2
import json
import datetime
import time
AMS_HOSTNAME = 'localhost'
AMS_PORT = '6188'
AMS_APP_ID = None
HOSTS_FILE = None
METRICS_FILE = None
OUT_DIR = None
PRECISION = 'minutes'
START_TIME = None
END_TIME = None
def get_collector_uri(metricNames, hostname = None):
if hostname:
return 'http://{0}:{1}/ws/v1/timeline/metrics?metricNames={2}&hostname={3}&appId={4}&startTime={5}&endTime={6}&precision={7}'\
.format(AMS_HOSTNAME, AMS_PORT, metricNames, hostname, AMS_APP_ID,
START_TIME, END_TIME, PRECISION)
else:
return 'http://{0}:{1}/ws/v1/timeline/metrics?metricNames={2}&appId={3}&startTime={4}&endTime={5}&precision={6}'\
.format(AMS_HOSTNAME, AMS_PORT, metricNames, AMS_APP_ID, START_TIME,
END_TIME, PRECISION)
def get_metrics(collector_uri):
req = urllib2.Request(collector_uri)
data = None
try:
data = urllib2.urlopen(req)
except Exception as e:
logger.error('Error on metrics GET request: %s' % collector_uri)
logger.error(str(e))
# Validate json before dumping
json_data = None
if data:
try:
json_data = json.loads(data.read())
except Exception as e:
logger.warn('Error parsing json data returned from URI: %s' % collector_uri)
logger.debug(str(e))
return json_data
def write_metrics_to_file(metrics, host = None):
for metric in metrics:
uri = get_collector_uri(metric, host)
logger.info('Request URI: %s' % str(uri))
metrics_json = get_metrics(uri)
if metrics_json:
if host:
path = os.path.join(OUT_DIR, host, metric)
else:
path = os.path.join(OUT_DIR, metric)
logger.info('Writing metric file: %s' % path)
with open(path, 'w') as file:
file.write(json.dumps(metrics_json))
pass
def export_ams_metrics():
if not os.path.exists(METRICS_FILE):
logger.error('Metrics file is required.')
sys.exit(1)
logger.info('Reading metrics file.')
metrics = []
with open(METRICS_FILE, 'r') as file:
for line in file:
metrics.append(line.strip())
pass
logger.info('Reading hosts file.')
hosts = []
if HOSTS_FILE and os.path.exists(HOSTS_FILE):
with open(HOSTS_FILE, 'r') as file:
for line in file:
hosts.append(line.strip())
else:
logger.info('No hosts file found, aggregate metrics will be exported.')
logger.info('Creating output dir.')
os.makedirs(OUT_DIR)
if hosts:
for host in hosts:
os.makedirs(os.path.join(OUT_DIR, host)) # create host dir
write_metrics_to_file(metrics, host)
else:
write_metrics_to_file(metrics, None)
pass
def get_epoch(input):
if (len(input) == 13):
return int(input)
elif (len(input) == 20):
return int(time.mktime(datetime.datetime.strptime(input,'%Y-%m-%dT%H:%M:%SZ').timetuple())*1000)
else:
return -1
pass
#
# Main.
#
def main():
parser = optparse.OptionParser(usage="usage: %prog [options]")
parser.set_description('This python program is a Ambari thin client and '
'supports export of ambari metric data for an app '
'from Ambari Metrics Service to a output dir. '
'The metrics will be exported to a file with name of '
'the metric and in a directory with the name as the '
'hostname under the output dir.')
d = datetime.datetime.now()
time_suffix = '{0}-{1}-{2}-{3}-{4}-{5}'.format(d.year, d.month, d.day,
d.hour, d.minute, d.second)
print 'Time: %s' % time_suffix
logfile = os.path.join('/tmp', 'ambari_metrics_export.out')
output_dir = os.path.join('/tmp', 'ambari_metrics_export_' + time_suffix)
parser.add_option("-v", "--verbose", dest="verbose", action="store_false",
default=False, help="output verbosity.")
parser.add_option("-s", "--host", dest="server_hostname",
help="AMS host name.")
parser.add_option("-p", "--port", dest="server_port",
default="6188" ,help="AMS port. [default: 6188]")
parser.add_option("-a", "--app-id", dest="app_id",
help="AMS app id.")
parser.add_option("-f", "--host-file", dest="hosts_file",
help="Host file with hostnames to query. New line separated.")
parser.add_option("-m", "--metrics-file", dest="metrics_file",
help="Metrics file with metric names to query. New line separated.")
parser.add_option("-o", "--output-dir", dest="output_dir", default=output_dir,
help="Output dir. [default: %s]" % output_dir)
parser.add_option("-l", "--logfile", dest="log_file", default=logfile,
metavar='FILE', help="Log file. [default: %s]" % logfile)
parser.add_option("-r", "--precision", dest="precision",
default='minutes', help="AMS API precision, default = minutes.")
parser.add_option("-b", "--start_time", dest="start_time",
help="Start time in milliseconds since epoch or UTC timestamp in YYYY-MM-DDTHH:mm:ssZ format.")
parser.add_option("-e", "--end_time", dest="end_time",
help="End time in milliseconds since epoch or UTC timestamp in YYYY-MM-DDTHH:mm:ssZ format.")
(options, args) = parser.parse_args()
global AMS_HOSTNAME
AMS_HOSTNAME = options.server_hostname
global AMS_PORT
AMS_PORT = options.server_port
global AMS_APP_ID
AMS_APP_ID = options.app_id
global HOSTS_FILE
HOSTS_FILE = options.hosts_file
global METRICS_FILE
METRICS_FILE = options.metrics_file
global PRECISION
PRECISION = options.precision
if options.log_file:
logfile = options.log_file
global logger
logger = logging.getLogger('AmbariMetricsExport')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
filehandler = logging.FileHandler(logfile)
consolehandler = logging.StreamHandler()
filehandler.setFormatter(formatter)
consolehandler.setFormatter(formatter)
logger.addHandler(filehandler)
logger.addHandler(consolehandler)
# set verbose
if options.verbose:
#logging.basicConfig(level=logging.DEBUG)
logger.setLevel(logging.DEBUG)
else:
#logging.basicConfig(level=logging.INFO)
logger.setLevel(logging.INFO)
if not options.metrics_file or not os.path.exists(options.metrics_file):
logger.warn('No valid metrics file path provided.')
logger.info('Aborting...')
sys.exit(1)
if options.output_dir and os.path.exists(options.output_dir):
logger.warn('Output directory {0} already exists.'.format(options.output_dir))
logger.info('Aborting...')
sys.exit(1)
if options.output_dir:
output_dir = options.output_dir
global OUT_DIR
OUT_DIR = output_dir
global START_TIME
START_TIME = get_epoch(options.start_time)
if START_TIME == -1:
logger.warn('No start time provided, or it is in the wrong format. Please '
'provide milliseconds since epoch or a value in YYYY-MM-DDTHH:mm:ssZ format')
logger.info('Aborting...')
sys.exit(1)
global END_TIME
END_TIME = get_epoch(options.end_time)
if END_TIME == -1:
logger.warn('No end time provided, or it is in the wrong format. Please '
'provide milliseconds since epoch or a value in YYYY-MM-DDTHH:mm:ssZ format')
logger.info('Aborting...')
sys.exit(1)
export_ams_metrics()
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, EOFError):
print("\nAborting ... Keyboard Interrupt.")
sys.exit(1)
|
py | 1a363aa2ad9b32be42b760ba12b36abc0793638c | import logging
from . import generic
from .elfreloc import ELFReloc
l = logging.getLogger('cle.backends.elf.relocations.ppc64')
# http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi-1.9.pdf
arch = 'PPC64'
class R_PPC64_JMP_SLOT(ELFReloc):
def relocate(self, solist, bypass_compatibility=False):
if not self.resolve_symbol(solist, bypass_compatibility):
return False
if self.owner_obj.is_ppc64_abiv1:
# R_PPC64_JMP_SLOT
# http://osxr.org/glibc/source/sysdeps/powerpc/powerpc64/dl-machine.h?v=glibc-2.15#0405
# copy an entire function descriptor struct
addr = self.resolvedby.owner_obj.memory.read_addr_at(self.resolvedby.relative_addr)
toc = self.resolvedby.owner_obj.memory.read_addr_at(self.resolvedby.relative_addr + 8)
aux = self.resolvedby.owner_obj.memory.read_addr_at(self.resolvedby.relative_addr + 16)
self.owner_obj.memory.write_addr_at(self.relative_addr, addr)
self.owner_obj.memory.write_addr_at(self.relative_addr + 8, toc)
self.owner_obj.memory.write_addr_at(self.relative_addr + 16, aux)
else:
self.owner_obj.memory.write_addr_at(self.relative_addr, self.resolvedby.rebased_addr)
return True
class R_PPC64_RELATIVE(generic.GenericRelativeReloc):
pass
class R_PPC64_IRELATIVE(generic.GenericIRelativeReloc):
pass
class R_PPC64_ADDR64(generic.GenericAbsoluteAddendReloc):
pass
class R_PPC64_GLOB_DAT(generic.GenericJumpslotReloc):
pass
class R_PPC64_DTPMOD64(generic.GenericTLSModIdReloc):
pass
class R_PPC64_DTPREL64(generic.GenericTLSDoffsetReloc):
pass
class R_PPC64_TPREL64(generic.GenericTLSOffsetReloc):
pass
|
py | 1a363bcdc4ae45dc5bfd6903da216c0d885d28b9 | import numpy as np
from lazy import lazy
from .cec2013lsgo import CEC2013LSGO
class F7(CEC2013LSGO):
"""
7-nonseparable, 1-separable Shifted and Rotated Elliptic Function
"""
def __init__(
self,
*,
rng_seed: int = 42,
use_shuffle: bool = False,
verbose: int = 0
):
super(F7, self).__init__(
rng_seed=rng_seed,
use_shuffle=use_shuffle,
verbose=verbose,
)
@property
def genome_size(self) -> np.ndarray:
return 1_000
@lazy
def lower_bound(self) -> np.ndarray:
lower_bound = [-100] * self.genome_size
return np.array(lower_bound)
@lazy
def upper_bound(self) -> np.ndarray:
upper_bound = [100] * self.genome_size
return np.array(upper_bound)
def _evaluate(self, x: np.ndarray) -> np.ndarray:
out_of_bounds = self.check_bounds(x)
out_of_bounds = np.any(out_of_bounds, axis=1)
x = x - self.xopt
fitness = 0
ldim = 0
for i in range(len(self.s)):
f: np.ndarray
z = x[:, self.p[ldim:ldim + self.s[i]] - 1].T
ldim += self.s[i]
if self.s[i] == 25:
f = self.R25
elif self.s[i] == 50:
f = self.R50
elif self.s[i] == 100:
f = self.R100
f = f @ z
f = self._schwefel(f.T)
fitness += self.w[i] * f
fitness += self._sphere(x[:, self.p[ldim:] - 1])
fitness[out_of_bounds] = None
return fitness
|
py | 1a363c4dd1f32706844590174fdbd402ac1b5c07 | #CODE3---First concatenating the required files into one based on the specfic attribute columns from SMPDB database and protein network---
#Python 3.6.5 |Anaconda, Inc.
import sys
import glob
import errno
import csv
path = '/home/16AT72P01/Excelra/SMPDB/smpdb_proteins/*.csv'
files = glob.glob(path)
with open("/home/16AT72P01/Excelra/SMPDB/output/metabolic_proteins.csv" ,'w') as csv_file:
writer = csv.writer(csv_file,quotechar='"', delimiter='\t', quoting=csv.QUOTE_ALL, skipinitialspace=True)
writer.writerow(["SMPDB_ID","PATHWAY_NAME","PATHWAY_LABEL","PROTEIN_NAME","GENE_NAME","LOCUS","UNIPROT_ID","GENEBANK_ID"])
for name in files:
try:
with open(name) as f1:
#reader = csv.reader(f1)
print(name)
reader = csv.DictReader(f1, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)
print(reader)
for row in reader:
print(row)
writer.writerow([row["SMPDB ID"],row["Pathway Name"],row["Pathway Subject"],row["Protein Name"],row["Gene Name"],row["Locus"],row["Uniprot ID"],row["GenBank ID"]]) #writer.writerow([row[0],row[1],row[2],row[3],row[4],row[8],row[6]])
f1.close()
except IOError as exc:
if exc.errno != errno.EISDIR: # Do not fail if a directory is found, just ignore it.
raise # Propagate other kinds of IOError.
csv_file.close()
|
py | 1a363c64542c467719e3000b80c262438ecc5c81 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
import crypten
import torch
from ..util import ConfigBase
__all__ = [
"exp",
"log",
"reciprocal",
"inv_sqrt",
"sqrt",
"_eix",
"cossin",
"cos",
"sin",
"sigmoid",
"tanh",
"erf",
"softmax",
"log_softmax",
]
@dataclass
class ApproxConfig:
"""
A configuration object for use by the MPCTensor.
"""
# exponential function
exp_iterations: int = 8
# reciprocal configuration
reciprocal_method: str = "NR"
reciprocal_nr_iters: int = 10
reciprocal_log_iters: int = 1
reciprocal_all_pos: bool = False
reciprocal_initial: any = None
# sqrt configuration
sqrt_nr_iters: int = 3
sqrt_nr_initial: any = None
# sigmoid / tanh configuration
sigmoid_tanh_method: str = "reciprocal"
sigmoid_tanh_terms: int = 32
# log configuration
log_iterations: int = 2
log_exp_iterations: int = 8
log_order: int = 8
# trigonometry configuration
trig_iterations: int = 10
# error function configuration:
erf_iterations: int = 8
# Global config
config = ApproxConfig()
def set_config(new_config):
global config
config = new_config
class ConfigManager(ConfigBase):
r"""
Use this to temporarily change a value in the `approximations.config` object. The
following sets `config.exp_iterations` to `10` for one function
invocation and then sets it back to the previous value::
with ConfigManager("exp_iterations", 10):
tensor.exp()
"""
def __init__(self, *args):
super().__init__(config, *args)
# Iterative methods:
def exp(self):
"""Approximates the exponential function using a limit approximation:
.. math::
exp(x) = \lim_{n \\rightarrow \\infty} (1 + x / n) ^ n
Here we compute exp by choosing n = 2 ** d for some large d equal to
`iterations`. We then compute (1 + x / n) once and square `d` times.
Set the number of iterations for the limit approximation with
config.exp_iterations.
""" # noqa: W605
result = 1 + self.div(2 ** config.exp_iterations)
for _ in range(config.exp_iterations):
result = result.square()
return result
def log(self, input_in_01=False):
r"""
Approximates the natural logarithm using 8th order modified
Householder iterations. This approximation is accurate within 2% relative
error on [0.0001, 250].
Iterations are computed by: :math:`h = 1 - x * exp(-y_n)`
.. math::
y_{n+1} = y_n - \sum_k^{order}\frac{h^k}{k}
Args:
input_in_01 (bool) : Allows a user to indicate that the input is in the domain [0, 1],
causing the function optimize for this domain. This is useful for computing
log-probabilities for entropy functions.
We shift the domain of convergence by a constant :math:`a` using the following identity:
.. math::
\ln{u} = \ln {au} - \ln{a}
Since the domain of convergence for CrypTen's log() function is approximately [1e-4, 1e2],
we can set :math:`a=100`.
Configuration parameters:
iterations (int): number of Householder iterations for the approximation
exp_iterations (int): number of iterations for limit approximation of exp
order (int): number of polynomial terms used (order of Householder approx)
"""
if input_in_01:
return log(self.mul(100)) - 4.605170
# Initialization to a decent estimate (found by qualitative inspection):
# ln(x) = x/120 - 20exp(-2x - 1.0) + 3.0
iterations = config.log_iterations
exp_iterations = config.log_exp_iterations
order = config.log_order
term1 = self.div(120)
term2 = exp(self.mul(2).add(1.0).neg()).mul(20)
y = term1 - term2 + 3.0
# 8th order Householder iterations
with ConfigManager("exp_iterations", exp_iterations):
for _ in range(iterations):
h = 1 - self * exp(-y)
y -= h.polynomial([1 / (i + 1) for i in range(order)])
return y
def reciprocal(self, input_in_01=False):
"""
Args:
input_in_01 (bool) : Allows a user to indicate that the input is in the range [0, 1],
causing the function optimize for this range. This is useful for improving
the accuracy of functions on probabilities (e.g. entropy functions).
Methods:
'NR' : `Newton-Raphson`_ method computes the reciprocal using iterations
of :math:`x_{i+1} = (2x_i - self * x_i^2)` and uses
:math:`3*exp(1 - 2x) + 0.003` as an initial guess by default
'log' : Computes the reciprocal of the input from the observation that:
:math:`x^{-1} = exp(-log(x))`
Configuration params:
reciprocal_method (str): One of 'NR' or 'log'.
reciprocal_nr_iters (int): determines the number of Newton-Raphson iterations to run
for the `NR` method
reciprocal_log_iters (int): determines the number of Householder
iterations to run when computing logarithms for the `log` method
reciprocal_all_pos (bool): determines whether all elements of the
input are known to be positive, which optimizes the step of
computing the sign of the input.
reciprocal_initial (tensor): sets the initial value for the
Newton-Raphson method. By default, this will be set to :math:
`3*exp(-(x-.5)) + 0.003` as this allows the method to converge over
a fairly large domain
.. _Newton-Raphson:
https://en.wikipedia.org/wiki/Newton%27s_method
"""
if input_in_01:
with ConfigManager("reciprocal_all_pos", True):
rec = reciprocal(self.mul(64)).mul(64)
return rec
method = config.reciprocal_method
if not config.reciprocal_all_pos:
sgn = self.sign()
pos = sgn * self
with ConfigManager("reciprocal_all_pos", True):
return sgn * reciprocal(pos)
if method == "NR":
if config.reciprocal_initial is None:
# Initialization to a decent estimate (found by qualitative inspection):
# 1/x = 3exp(1 - 2x) + 0.003
result = 3 * (1 - 2 * self).exp() + 0.003
else:
result = config.reciprocal_initial
for _ in range(config.reciprocal_nr_iters):
if hasattr(result, "square"):
result += result - result.square().mul_(self)
else:
result = 2 * result - result * result * self
return result
elif method == "log":
with ConfigManager("log_iters", config.reciprocal_log_iters):
return exp(-log(self))
else:
raise ValueError(f"Invalid method {method} given for reciprocal function")
def inv_sqrt(self):
"""
Computes the inverse square root of the input using the Newton-Raphson method.
Configuration params:
sqrt_nr_iters (int): determines the number of Newton-Raphson iterations to run.
sqrt_nr_initial (tensor): sets the initial value for the Newton-Raphson iterations.
By default, this will be set to allow the method to converge over a
fairly large domain.
.. _Newton-Raphson:
https://en.wikipedia.org/wiki/Fast_inverse_square_root#Newton's_method
"""
# Initialize using decent approximation
if config.sqrt_nr_initial is None:
y = exp(self.div(2).add(0.2).neg()).mul(2.2).add(0.2)
y -= self.div(1024)
else:
y = config.sqrt_nr_initial
# Newton Raphson iterations for inverse square root
for _ in range(config.sqrt_nr_iters):
y = y.mul_(3 - self * y.square()).div_(2)
return y
def sqrt(self):
"""
Computes the square root of the input by computing its inverse square root using
the Newton-Raphson method and multiplying by the input.
Configuration params:
sqrt_nr_iters (int): determines the number of Newton-Raphson iterations to run
sqrt_initial (tensor): sets the initial value for the inverse square root
Newton-Raphson iterations. By default, this will be set to allow convergence
over a fairly large domain.
.. _Newton-Raphson:
https://en.wikipedia.org/wiki/Fast_inverse_square_root#Newton's_method
"""
return inv_sqrt(self).mul_(self)
def _eix(self):
"""Computes e^(i * self) where i is the imaginary unit.
Returns (Re{e^(i * self)}, Im{e^(i * self)} = cos(self), sin(self)
"""
iterations = config.trig_iterations
re = 1
im = self.div(2 ** iterations)
# First iteration uses knowledge that `re` is public and = 1
re -= im.square()
im *= 2
# Compute (a + bi)^2 -> (a^2 - b^2) + (2ab)i `iterations` times
for _ in range(iterations - 1):
a2 = re.square()
b2 = im.square()
im = im.mul_(re)
im._tensor *= 2
re = a2 - b2
return re, im
def cossin(self):
"""Computes cosine and sine of input via exp(i * x).
Args:
iterations (int): for approximating exp(i * x)
"""
return self._eix()
def cos(self):
"""Computes the cosine of the input using cos(x) = Re{exp(i * x)}
Args:
iterations (int): for approximating exp(i * x)
"""
return cossin(self)[0]
def sin(self):
"""Computes the sine of the input using sin(x) = Im{exp(i * x)}
Args:
iterations (int): for approximating exp(i * x)
"""
return cossin(self)[1]
# Logistic Functions
def sigmoid(self):
"""Computes the sigmoid function using the following definition
.. math::
\sigma(x) = (1 + e^{-x})^{-1}
If a valid method is given, this function will compute sigmoid
using that method:
"chebyshev" - computes tanh via Chebyshev approximation with
truncation and uses the identity:
.. math::
\sigma(x) = \frac{1}{2}tanh(\frac{x}{2}) + \frac{1}{2}
"reciprocal" - computes sigmoid using :math:`1 + e^{-x}` and computing
the reciprocal
""" # noqa: W605
method = config.sigmoid_tanh_method
if method == "chebyshev":
tanh_approx = tanh(self.div(2))
return tanh_approx.div(2) + 0.5
elif method == "reciprocal":
ltz = self._ltz()
sign = 1 - 2 * ltz
pos_input = self.mul(sign)
denominator = pos_input.neg().exp().add(1)
with ConfigManager(
"exp_iterations",
9,
"reciprocal_nr_iters",
3,
"reciprocal_all_pos",
True,
"reciprocal_initial",
0.75,
):
pos_output = denominator.reciprocal()
result = pos_output.where(1 - ltz, 1 - pos_output)
# TODO: Support addition with different encoder scales
# result = pos_output + ltz - 2 * pos_output * ltz
return result
else:
raise ValueError(f"Unrecognized method {method} for sigmoid")
def tanh(self):
r"""Computes the hyperbolic tangent function using the identity
.. math::
tanh(x) = 2\sigma(2x) - 1
If a valid method is given, this function will compute tanh using that method:
"chebyshev" - computes tanh via Chebyshev approximation with truncation.
.. math::
tanh(x) = \sum_{j=1}^terms c_{2j - 1} P_{2j - 1} (x / maxval)
where c_i is the ith Chebyshev series coefficient and P_i is ith polynomial.
The approximation is truncated to +/-1 outside [-1, 1].
Args:
terms (int): highest degree of Chebyshev polynomials.
Must be even and at least 6.
"""
method = config.sigmoid_tanh_method
terms = config.sigmoid_tanh_terms
if method == "reciprocal":
return self.mul(2).sigmoid().mul(2).sub(1)
elif method == "chebyshev":
coeffs = crypten.common.util.chebyshev_series(torch.tanh, 1, terms)[1::2]
tanh_polys = _chebyshev_polynomials(self, terms)
tanh_polys_flipped = (
tanh_polys.unsqueeze(dim=-1).transpose(0, -1).squeeze(dim=0)
)
out = tanh_polys_flipped.matmul(coeffs)
# truncate outside [-maxval, maxval]
return out.hardtanh()
else:
raise ValueError(f"Unrecognized method {method} for tanh")
def _chebyshev_polynomials(self, terms):
r"""Evaluates odd degree Chebyshev polynomials at x
Chebyshev Polynomials of the first kind are defined as
.. math::
P_0(x) = 1, P_1(x) = x, P_n(x) = 2 P_{n - 1}(x) - P_{n-2}(x)
Args:
self (MPCTensor): input at which polynomials are evaluated
terms (int): highest degree of Chebyshev polynomials.
Must be even and at least 6.
Returns:
MPCTensor of polynomials evaluated at self of shape `(terms, *self)`
"""
if terms % 2 != 0 or terms < 6:
raise ValueError("Chebyshev terms must be even and >= 6")
polynomials = [self.clone()]
y = 4 * self.square() - 2
z = y - 1
polynomials.append(z.mul(self))
for k in range(2, terms // 2):
next_polynomial = y * polynomials[k - 1] - polynomials[k - 2]
polynomials.append(next_polynomial)
return crypten.stack(polynomials)
def erf(tensor):
"""
Approximates the error function of the input tensor using a Taylor approximation.
"""
output = tensor.clone()
for n in range(1, config.erf_iterations + 1):
multiplier = ((-1) ** n) / (math.factorial(n) * (2 * n + 1))
output = output.add(tensor.pos_pow(2 * n + 1).mul(multiplier))
return output.mul(2.0 / math.sqrt(math.pi))
# NOTE: This approximation is not unstable for large tensor values.
def softmax(self, dim, **kwargs):
"""Compute the softmax of a tensor's elements along a given dimension"""
# 0-d case
if self.dim() == 0:
assert dim == 0, "Improper dim argument"
return self.new(torch.ones_like((self.data)))
if self.size(dim) == 1:
return self.new(torch.ones_like(self.data))
maximum_value = self.max(dim, keepdim=True)[0]
logits = self - maximum_value
numerator = logits.exp()
with ConfigManager("reciprocal_all_pos", True):
inv_denominator = numerator.sum(dim, keepdim=True).reciprocal()
return numerator * inv_denominator
def log_softmax(self, dim, **kwargs):
"""Applies a softmax followed by a logarithm.
While mathematically equivalent to log(softmax(x)), doing these two
operations separately is slower, and numerically unstable. This function
uses an alternative formulation to compute the output and gradient correctly.
"""
# 0-d case
if self.dim() == 0:
assert dim == 0, "Improper dim argument"
return self.new(torch.zeros((), device=self.device))
if self.size(dim) == 1:
return self.new(torch.zeros_like(self.data))
maximum_value = self.max(dim, keepdim=True)[0]
logits = self - maximum_value
normalize_term = exp(logits).sum(dim, keepdim=True)
result = logits - normalize_term.log()
return result
|
py | 1a363c7abb4d9d8be3ac2053b11df223859f6878 | import kivy
kivy.require('1.7.2')
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.scrollview import ScrollView
from kivy.uix.dropdown import DropDown
from kivy.uix.screenmanager import Screen
class templateScreen(Screen):
subject_dict = {}
def __init__(self, **kwargs):
super(templateScreen, self).__init__(**kwargs)
self.monitor_button_dict = dict()
# display space
self.layout_line = GridLayout(cols = 3, spacing = 20, padding = 20, size_hint_y = None)
self.layout_line.bind(minimum_height=self.layout_line.setter('height'))
self.scroll = ScrollView(size_hint=(1, 1.5))
self.scroll.add_widget(self.layout_line)
self.work_space = BoxLayout(orientation='vertical')
self.work_space.add_widget(self.scroll)
# button menu
self.button_menu = BoxLayout(orientation='horizontal', size_hint_y = .2, spacing=10, padding=10)
# drop down list for monitor type set up
self.dropdown_monitor_type = DropDown()
self.mainbutton_monitor_type = Button(text='Monitor type')
self.mainbutton_monitor_type.bind(on_release=self.dropdown_monitor_type.open)
self.dropdown_monitor_type.bind(on_select=lambda instance, x: setattr(self.mainbutton_monitor_type, 'text', x))
# drop down list for location selection set up
self.dropdown = DropDown()
self.mainbutton = Button(text="location list insert")
self.mainbutton.bind(on_release=self.dropdown.open)
self.dropdown.bind(on_select=lambda instance, x: setattr(self.mainbutton, 'text', x))
# add button
self.add_btn = Button(text="Add", background_color = (0, 0, 255, 1))
self.add_btn.bind(on_press = self.add_location)
# navigating button
self.navigate_btn = Button(text="Switch to [destination]", background_color = (0, 0, 1, 255))
self.navigate_btn.bind(on_press= self.navigation)
# push all buttons into button menu
self.button_menu.add_widget(self.add_btn)
self.button_menu.add_widget(self.mainbutton_monitor_type)
self.button_menu.add_widget(self.mainbutton)
self.button_menu.add_widget(self.navigate_btn)
# add button menu into work space layout
self.work_space.add_widget(self.button_menu)
# add work space layout to the screen
self.add_widget(self.work_space)
'''
needs to override in child class
choose the destination to navigate
'''
def navigation(self, *args):
pass
def add_location(self, *args):
pass
def interval_update(self, *args):
pass
def button_dict_storage(self, btn_object, location):
if location not in self.monitor_button_dict.keys():
self.monitor_button_dict[location] = []
self.monitor_button_dict[location].append(btn_object)
|
py | 1a363f64b046348b49d289fc99df59f98e8e1208 | from unittest2 import TestCase
class BaseTestCase(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
|
py | 1a363ff614fb13d3605f220152bf8a1675b85c9c | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
import os
import shutil
import subprocess
"""Copy Special exercise
"""
# +++your code here+++
# Write functions and modify main() to call them
def is_special(filename):
return True if re.search(r'__\w+__', filename) else False
def sp_files_list(dir):
filenames = os.listdir(dir)
result = []
for filename in filenames:
if is_special(filename):
result.append(os.path.abspath(os.path.join(dir, filename)))
return result
def to_dir(orig, dst):
# copy all the special files located in the directories in "orig"
#to the "dst" directory
if not os.path.exists(dst): os.makedirs(dst)
for dir in orig:
filenames = os.listdir(dir)
for filename in filenames:
if is_special(filename):
shutil.copy(os.path.abspath(os.path.join(dir, filename)), dst)
def to_zip(zip_file, args):
files = []
for arg in args:
files += sp_files_list(arg)
cmd = '7z a ' + zip_file + ' ' + ' '.join(files)
print('Command I\'m about to do: ' + cmd)
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
shell=True)
except subprocess.CalledProcessError as exc:
print("Error: ", exc.returncode, exc.output)
else:
print(output)
def main():
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print("usage: [--todir dir][--tozip zipfile] dir [dir ...]")
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
to_dir(args, todir)
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
del args[0:2]
to_zip(tozip, args)
if len(args) == 0:
print("error: must specify one or more dirs")
sys.exit(1)
if not todir and not tozip:
for arg in args:
print('\n'.join(sp_files_list(arg)))
if __name__ == "__main__":
main()
|
py | 1a3640df6aa0522e17657d042ab00a8f3142181d | layer_info = \
{1: {'B': 1, 'K': 64, 'C': 3, 'OY': 112, 'OX': 112, 'FY': 7, 'FX': 7, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
2: {'B': 1, 'K': 64, 'C': 64, 'OY': 56, 'OX': 56, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
3: {'B': 1, 'K': 64, 'C': 64, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
4: {'B': 1, 'K': 256, 'C': 64, 'OY': 56, 'OX': 56, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
5: {'B': 1, 'K': 16, 'C': 256, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
6: {'B': 1, 'K': 256, 'C': 16, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
7: {'B': 1, 'K': 256, 'C': 64, 'OY': 56, 'OX': 56, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
8: {'B': 1, 'K': 64, 'C': 256, 'OY': 56, 'OX': 56, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
9: {'B': 1, 'K': 64, 'C': 64, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
10: {'B': 1, 'K': 256, 'C': 64, 'OY': 56, 'OX': 56, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
11: {'B': 1, 'K': 16, 'C': 256, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
12: {'B': 1, 'K': 256, 'C': 16, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
13: {'B': 1, 'K': 64, 'C': 256, 'OY': 56, 'OX': 56, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
14: {'B': 1, 'K': 64, 'C': 64, 'OY': 56, 'OX': 56, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
15: {'B': 1, 'K': 256, 'C': 64, 'OY': 56, 'OX': 56, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
16: {'B': 1, 'K': 16, 'C': 256, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
17: {'B': 1, 'K': 256, 'C': 16, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
18: {'B': 1, 'K': 128, 'C': 256, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
19: {'B': 1, 'K': 128, 'C': 128, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
20: {'B': 1, 'K': 512, 'C': 128, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
21: {'B': 1, 'K': 32, 'C': 512, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
22: {'B': 1, 'K': 512, 'C': 32, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
23: {'B': 1, 'K': 512, 'C': 256, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
24: {'B': 1, 'K': 128, 'C': 512, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
25: {'B': 1, 'K': 128, 'C': 128, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
26: {'B': 1, 'K': 512, 'C': 128, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
27: {'B': 1, 'K': 32, 'C': 512, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
28: {'B': 1, 'K': 512, 'C': 32, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
29: {'B': 1, 'K': 128, 'C': 512, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
30: {'B': 1, 'K': 128, 'C': 128, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
31: {'B': 1, 'K': 512, 'C': 128, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
32: {'B': 1, 'K': 32, 'C': 512, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
33: {'B': 1, 'K': 512, 'C': 32, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
34: {'B': 1, 'K': 128, 'C': 512, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
35: {'B': 1, 'K': 128, 'C': 128, 'OY': 28, 'OX': 28, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
36: {'B': 1, 'K': 512, 'C': 128, 'OY': 28, 'OX': 28, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
37: {'B': 1, 'K': 32, 'C': 512, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
38: {'B': 1, 'K': 512, 'C': 32, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
39: {'B': 1, 'K': 256, 'C': 512, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
40: {'B': 1, 'K': 256, 'C': 256, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
41: {'B': 1, 'K': 1024, 'C': 256, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
42: {'B': 1, 'K': 64, 'C': 1024, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
43: {'B': 1, 'K': 1024, 'C': 64, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
44: {'B': 1, 'K': 1024, 'C': 512, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
45: {'B': 1, 'K': 256, 'C': 1024, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
46: {'B': 1, 'K': 256, 'C': 256, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
47: {'B': 1, 'K': 1024, 'C': 256, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
48: {'B': 1, 'K': 64, 'C': 1024, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
49: {'B': 1, 'K': 1024, 'C': 64, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
50: {'B': 1, 'K': 256, 'C': 1024, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
51: {'B': 1, 'K': 256, 'C': 256, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
52: {'B': 1, 'K': 1024, 'C': 256, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
53: {'B': 1, 'K': 64, 'C': 1024, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
54: {'B': 1, 'K': 1024, 'C': 64, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
55: {'B': 1, 'K': 256, 'C': 1024, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
56: {'B': 1, 'K': 256, 'C': 256, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
57: {'B': 1, 'K': 1024, 'C': 256, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
58: {'B': 1, 'K': 64, 'C': 1024, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
59: {'B': 1, 'K': 1024, 'C': 64, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
60: {'B': 1, 'K': 256, 'C': 1024, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
61: {'B': 1, 'K': 256, 'C': 256, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
62: {'B': 1, 'K': 1024, 'C': 256, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
63: {'B': 1, 'K': 64, 'C': 1024, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
64: {'B': 1, 'K': 1024, 'C': 64, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
65: {'B': 1, 'K': 256, 'C': 1024, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
66: {'B': 1, 'K': 256, 'C': 256, 'OY': 14, 'OX': 14, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
67: {'B': 1, 'K': 1024, 'C': 256, 'OY': 14, 'OX': 14, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
68: {'B': 1, 'K': 64, 'C': 1024, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
69: {'B': 1, 'K': 1024, 'C': 64, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
70: {'B': 1, 'K': 512, 'C': 1024, 'OY': 7, 'OX': 7, 'FY': 1, 'FX': 1, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
71: {'B': 1, 'K': 512, 'C': 512, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
72: {'B': 1, 'K': 2048, 'C': 512, 'OY': 7, 'OX': 7, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
73: {'B': 1, 'K': 128, 'C': 2048, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
74: {'B': 1, 'K': 2048, 'C': 128, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
75: {'B': 1, 'K': 2048, 'C': 1024, 'OY': 7, 'OX': 7, 'FY': 1, 'FX': 1, 'SY': 2, 'SX': 2, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
76: {'B': 1, 'K': 512, 'C': 2048, 'OY': 7, 'OX': 7, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
77: {'B': 1, 'K': 512, 'C': 512, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
78: {'B': 1, 'K': 2048, 'C': 512, 'OY': 7, 'OX': 7, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
79: {'B': 1, 'K': 128, 'C': 2048, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
80: {'B': 1, 'K': 2048, 'C': 128, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
81: {'B': 1, 'K': 512, 'C': 2048, 'OY': 7, 'OX': 7, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
82: {'B': 1, 'K': 512, 'C': 512, 'OY': 7, 'OX': 7, 'FY': 3, 'FX': 3, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
83: {'B': 1, 'K': 2048, 'C': 512, 'OY': 7, 'OX': 7, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
84: {'B': 1, 'K': 128, 'C': 2048, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
85: {'B': 1, 'K': 2048, 'C': 128, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1},
86: {'B': 1, 'K': 1000, 'C': 2048, 'OY': 1, 'OX': 1, 'FY': 1, 'FX': 1, 'SY': 1, 'SX': 1, 'SFY': 1, 'SFX': 1, 'PY': 0, 'PX': 0, 'G': 1}}
|
py | 1a3641440c5b42c33ec76d097bd623d0a93cb2a0 | import asyncio
import logging
import os
from watchdog.events import FileModifiedEvent, PatternMatchingEventHandler
from watchdog.observers import Observer
from watchdog.utils.patterns import match_any_paths
class WatcherHandler(PatternMatchingEventHandler):
"""Watcher class to observe changes in all specified files in the folder"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.observed = {}
def match_file(self, path):
"""Check if the path matches the patterns and folder"""
return match_any_paths(
[path],
included_patterns=self.patterns,
excluded_patterns=self.ignore_patterns,
case_sensitive=self.case_sensitive,
)
def get_size(self, path):
return self.observed.get(path, 0)
def set_size(self, path, size):
self.observed[path] = size
def read_initial_size(self, path):
"""Read the initial size of the file to not send the entire file on start"""
if os.path.isfile(path):
if self.match_file(path):
self.observed[path] = os.path.getsize(path)
return
for dirname, _, files in os.walk(path):
for file in files:
path = os.path.join(dirname, file)
if self.match_file(path):
self.observed[path] = os.path.getsize(path)
def on_new_line(self, path, line):
"""Send the line to the logging"""
logging.getLogger(path).info(line)
def on_modified(self, event):
"""React on modified files and append the new lines"""
if not isinstance(event, FileModifiedEvent):
return
size = os.path.getsize(event.src_path)
# Get the already observed lines
current = self.get_size(event.src_path)
if current >= size:
self.set_size(event.src_path, size)
return
# Open the file and seek to the last position
with open(event.src_path) as fp:
fp.seek(current)
# Read line by line and only use full lines
for line in fp:
stripped = line.strip()
if line.endswith("\n") and stripped:
current += len(line)
self.on_new_line(event.src_path, stripped)
# Update the position
self.set_size(event.src_path, current)
async def watch(path, **kwargs):
"""Watch on files of in a directory and log new lines"""
handler = WatcherHandler(**kwargs)
handler.read_initial_size(path)
observer = Observer()
observer.schedule(handler, path=path, recursive=True)
observer.start()
try:
while observer.is_alive():
await asyncio.sleep(0.1)
finally:
observer.stop()
observer.join()
|
py | 1a36419f07fd906ba16817c64a31393022915054 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) 2016, Tomoyuki Sakurai <[email protected]>
#
# This file is NOT part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import time
import os
import hashlib
DOCUMENTATION = '''
---
module: logrotate
short_description: Manage config files for logrotate
description:
- Creates a config flie for I(logrotate)
version_added: "1.0"
options:
name:
description:
- Unique name of the config
required: true
default: null
files:
description:
- An array of path to files the I(logrotate) program to rotate
required: true
default: null
state:
description:
- The state of the logrotate config
required: true
default: null
choices: [ "present", "absent" ]
frequency:
description:
- rotate frequency
required: false
choices: [ "daily", "weekly", "monthly", "yearly" ]
default: "daily
rotate:
description:
- number of times before being removed
required: false
default: 30
files:
description:
- an array of paths to files to rotate
required: true
default: null
compress:
description:
- compress the rotated file if true
required: false
choices: [ "yes", "no" ]
default: true
compresscmd:
description:
- command to use to compress log files
required: false
default: False
uncompresscmd:
description:
- command to use to uncompress log files
required: false
default: False
compressext
description:
- extension to use on compressed logfiles, if compression is enabled
required: false
default: False
delaycompress:
description:
- delay compress
required: false
choices: [ "yes", "no" ]
default: true
copytruncate:
description:
- Truncate the original log file to zero size in place after creating a copy, instead of moving the old log file and optionally creating a new one.
required: false
choices: [ "yes", "no" ]
default: false
missingok:
description:
- proceed without a warning if the file to rotate is missing
required: false
choices: [ "yes", "no" ]
default: true
sharedscripts:
description:
- postrotate commands for multiple files are run only once
required: false
choices: [ "yes", "no" ]
default: false
notifempty:
description:
- do not rotate the log if it is empty
required: false
choices: [ "yes", "no" ]
default: no
postrotate:
description:
- an array of commands to run in postrotate
required: false
default: null
config_dir:
description:
- base directory of config files
required: false
default: /etc/logrotate.d
create:
description:
- Immediately after rotation (before the postrotate script is run) the log file is created
required: false
default: False
nocreate:
description:
- disable 'create' option
required: false
default: False
su:
description:
- Rotate log files set under this user and group instead of using default user/group
required: false
default: False
maxsize:
description:
- Log files are rotated when they grow bigger than size bytes even before the additionally specified time interval
required: false
default: False
minsize:
description:
- Log files are rotated when they grow bigger than size bytes, but not before the additionally specified time interval
required: false
default: False
size:
description:
- Log files are rotated only if they grow bigger then size bytes
required: false
default: False
requirements: [ ]
author: "Tomoyuki Sakurai <[email protected]>"
'''
EXAMPLES = '''
# lotate /var/log/messages and maillog daily, keep 30 files and restart syslog only once
- logrotate: frequency="daily", rotate="30", files=[ "/var/log/messages", "/bar/log/maillog" ] postrotate="kill -HUP `cat /var/run/syslog.pid`" sharedscripts=yes
'''
def validate_config(module):
"""Validate a file given with logrotate -d file"""
name = module.params.get('name')
contents = generate_config(module)
fd, temppath = tempfile.mkstemp(prefix='ansible-logrotate')
fh = os.fdopen(fd, 'w')
fh.write(contents)
fh.close()
LOGROTATE = module.get_bin_path('logrotate', True)
# read not only the file to validate but the default configuration because
# some defaults are needed to validate, notably `su` directive
default_config_path = get_default_config_path(module)
rc, out, err = module.run_command('%s -d %s %s' % (LOGROTATE, default_config_path, temppath), check_rc=True)
os.unlink(temppath)
if rc != 0:
module.fail_json(msg='failed to validate config for: %s' % (name), stdout=out, stderr=err)
def get_default_config_path(module):
"""Look for the default configuration and return the first one found"""
locations = [
# Linux
'/etc/logrotate.conf',
# FreeBSD
'/usr/local/etc/logrotate.conf'
]
found = ''
for path in locations:
if os.path.exists(path):
found = path
break
if not found:
module.fail_json(msg='cannot find logrotate.conf in default locations')
return found
def get_config_path(module):
return os.path.join(module.params.get('config_dir'), module.params.get('name'))
def create_config(module):
with open(get_config_path(module), 'w') as f:
f.write(generate_config(module))
def generate_config(module):
files = "\n".join(module.params.get('files'))
options = []
if module.params.get('compress'):
options += [ 'compress' ]
if module.params.get('compresscmd'):
options += [ 'compresscmd %s' % module.params.get('compresscmd') ]
if module.params.get('uncompresscmd'):
options += [ 'uncompresscmd %s' % module.params.get('uncompresscmd') ]
if module.params.get('compressext'):
options += [ 'compressext %s' % module.params.get('compressext') ]
if module.params.get('delaycompress'):
options += [ 'delaycompress' ]
if module.params.get('missingok'):
options += [ 'missingok' ]
if module.params.get('notifempty'):
options += [ 'notifempty' ]
if module.params.get('copytruncate'):
options += [ 'copytruncate' ]
if module.params.get('create'):
options += [ 'create %s' % module.params.get('create') ]
if module.params.get('nocreate'):
options += [ 'nocreate' ]
if module.params.get('su'):
options += [ 'su %s' % module.params.get('su') ]
if module.params.get('maxsize'):
options += [ 'maxsize %s' % module.params.get('maxsize') ]
if module.params.get('minsize'):
options += [ 'minsize %s' % module.params.get('minsize') ]
if module.params.get('size'):
options += [ 'size %s' % module.params.get('size') ]
options += [ '%s' % module.params.get('frequency') ]
options += [ 'rotate %s' % module.params.get('rotate') ]
if module.params.get('postrotate'):
if module.params.get('sharedscripts'):
options += [ 'sharedscripts' ]
options += [ 'postrotate' ]
options += map(lambda x: " %s" % x, module.params.get('postrotate'))
options += [ 'endscript' ]
TEMPLATE = """\
# Generated by ansible logrotate module
{files_text}
{{
{option_text}
}}
"""
return TEMPLATE.format(files_text=files, option_text='\n '.join(options))
def is_identical(a, b):
a_hash = hashlib.sha1(a.encode('utf-8')).hexdigest()
b_hash = hashlib.sha1(b.encode('utf-8')).hexdigest()
return a_hash == b_hash
def create_if_absent(module):
# XXX disable validation. recent logrotate fails when duplicate log entry
# for a log file is found.
# validate_config(module)
path = get_config_path(module)
if os.path.isfile(path):
data = None
with open(path) as f:
data = f.read()
if is_identical(data, generate_config(module)):
module.exit_json(changed=False, result="Success")
else:
create_config(module)
module.exit_json(changed=True, result="Created")
else:
create_config(module)
module.exit_json(changed=True, result="Created")
def remove_if_present(module):
path = get_config_path(module)
if os.path.isfile(path):
os.remove(path)
module.exit_json(changed=True, result="Removed")
else:
module.exit_json(changed=False, result="Success")
def main():
arg_spec = dict(
name = dict(required=True),
files = dict(required=True, type='list'),
state = dict(required=True, choices=['present', 'absent']),
frequency = dict(required=False, default='daily', choices=['daily', 'weekly', 'monthly', 'yearly']),
rotate = dict(required=False, default=30, type='int'),
compress = dict(required=False, default='yes', type='bool'),
compresscmd = dict(required=False),
uncompresscmd = dict(required=False),
compressext = dict(required=False),
copytruncate = dict(required=False, default='no', type='bool'),
delaycompress = dict(required=False, default='yes', type='bool'),
missingok = dict(required=False, default='yes', type='bool'),
sharedscripts = dict(required=False, default='yes', type='bool'),
notifempty = dict(required=False, default='no', type='bool'),
postrotate = dict(required=False, type='list'),
config_dir = dict(required=False, default='/etc/logrotate.d'),
create = dict(required=False),
nocreate = dict(required=False, type='bool'),
su = dict(required=False),
maxsize = dict(required=False),
minsize = dict(required=False),
size = dict(required=False)
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=False)
if module.check_mode:
module.exit_json(changed=True)
else:
if module.params.get('state') == 'present':
create_if_absent(module)
elif module.params.get('state') == 'absent':
remove_if_present(module)
else:
module.fail_json('Unknown state: %s' % mudule.params.get('state'))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
py | 1a3642d7e1e6cf6b7554ba23f3a325f559a09436 | """
Author: Ce Li
Tool for generator
"""
import copy
import math
import numpy as np
from tensorflow.keras import utils as np_utils
EPSILON = 1e-7
class Generator(np_utils.Sequence):
def __init__(self, x, x_authors, y, b_size, max_papers, max_seq, max_authors):
self.x, self.x_authors, self.y = x, x_authors, y
self.batch_size = b_size
self.max_papers = max_papers
self.max_seq = max_seq
self.max_authors = max_authors
self.author_emb_dim = 128
self.paper_emb_dim = 256
def __len__(self):
return math.ceil(len(self.x)/self.batch_size) # ceil or floor
def __getitem__(self, idx):
b_x = copy.deepcopy(
self.x[idx*self.batch_size:(idx+1)*self.batch_size])
b_x_authors = copy.deepcopy(
self.x_authors[idx * self.batch_size:(idx + 1) * self.batch_size])
b_y = copy.deepcopy(self.y[idx*self.batch_size:(idx+1)*self.batch_size])
for temp in b_x_authors:
for tem in temp:
for te in tem:
while len(te) < self.max_authors:
te.append(np.zeros(self.author_emb_dim))
while len(tem) < self.max_seq:
tem.append(np.zeros(shape=(self.max_authors, self.author_emb_dim)))
while len(temp) < self.max_papers:
temp.append(np.zeros(shape=(self.max_seq, self.max_authors, self.author_emb_dim)))
b_x_authors = np.array(b_x_authors)
for temp in b_x:
for tem in temp:
while len(tem) < self.max_seq:
tem.append(np.zeros(tem[0].shape))
while len(temp) < self.max_papers:
temp.append(np.zeros(shape=(self.max_seq, self.paper_emb_dim)))
b_x = np.array(b_x)
return (b_x, b_x_authors), np.array(b_y)
|
py | 1a36431ca48835cd3dcbd6fdd8f18ffc3e7e4096 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Reference circuits used by the tests."""
from qiskit.circuit import QuantumCircuit, QuantumRegister, ClassicalRegister
class ReferenceCircuits:
"""Container for reference circuits used by the tests."""
@staticmethod
def bell():
"""Return a Bell circuit."""
qr = QuantumRegister(2, name='qr')
cr = ClassicalRegister(2, name='qc')
qc = QuantumCircuit(qr, cr, name='bell')
qc.h(qr[0])
qc.cx(qr[0], qr[1])
qc.measure(qr, cr)
return qc
@staticmethod
def bell_no_measure():
"""Return a Bell circuit."""
qr = QuantumRegister(2, name='qr')
qc = QuantumCircuit(qr, name='bell_no_measure')
qc.h(qr[0])
qc.cx(qr[0], qr[1])
return qc
|
py | 1a364372584a607d7af68485a384ead0157f3604 | from argparse import Namespace
import asyncio
import logging
import signal
import sys
from typing import Type
from evm.chains.mainnet import (
MAINNET_NETWORK_ID,
)
from evm.chains.ropsten import (
ROPSTEN_NETWORK_ID,
)
from evm.db.backends.base import BaseDB
from evm.db.backends.level import LevelDB
from p2p.service import BaseService
from trinity.exceptions import (
AmbigiousFileSystem,
MissingPath,
)
from trinity.chains import (
initialize_data_dir,
is_data_dir_initialized,
serve_chaindb,
)
from trinity.console import (
console,
)
from trinity.cli_parser import (
parser,
)
from trinity.config import (
ChainConfig,
)
from trinity.extensibility import (
PluginManager,
)
from trinity.extensibility.events import (
TrinityStartupEvent
)
from trinity.plugins.registry import (
ENABLED_PLUGINS
)
from trinity.utils.ipc import (
wait_for_ipc,
kill_process_gracefully,
)
from trinity.utils.logging import (
setup_trinity_stdout_logging,
setup_trinity_file_and_queue_logging,
with_queued_logging,
)
from trinity.utils.mp import (
ctx,
)
from trinity.utils.profiling import (
setup_cprofiler,
)
from trinity.utils.version import (
construct_trinity_client_identifier,
)
PRECONFIGURED_NETWORKS = {MAINNET_NETWORK_ID, ROPSTEN_NETWORK_ID}
TRINITY_HEADER = (
"\n"
" ______ _ _ __ \n"
" /_ __/____(_)___ (_) /___ __\n"
" / / / ___/ / __ \/ / __/ / / /\n"
" / / / / / / / / / / /_/ /_/ / \n"
" /_/ /_/ /_/_/ /_/_/\__/\__, / \n"
" /____/ "
)
TRINITY_AMBIGIOUS_FILESYSTEM_INFO = (
"Could not initialize data directory\n\n"
" One of these conditions must be met:\n"
" * HOME environment variable set\n"
" * XDG_TRINITY_ROOT environment variable set\n"
" * TRINITY_DATA_DIR environment variable set\n"
" * --data-dir command line argument is passed\n"
"\n"
" In case the data directory is outside of the trinity root directory\n"
" Make sure all paths are pre-initialized as Trinity won't attempt\n"
" to create directories outside of the trinity root directory\n"
)
def main() -> None:
plugin_manager = setup_plugins()
plugin_manager.amend_argparser_config(parser)
args = parser.parse_args()
log_level = getattr(logging, args.log_level.upper())
if args.network_id not in PRECONFIGURED_NETWORKS:
raise NotImplementedError(
"Unsupported network id: {0}. Only the ropsten and mainnet "
"networks are supported.".format(args.network_id)
)
logger, formatter, handler_stream = setup_trinity_stdout_logging(log_level)
try:
chain_config = ChainConfig.from_parser_args(args)
except AmbigiousFileSystem:
exit_because_ambigious_filesystem(logger)
if not is_data_dir_initialized(chain_config):
# TODO: this will only work as is for chains with known genesis
# parameters. Need to flesh out how genesis parameters for custom
# chains are defined and passed around.
try:
initialize_data_dir(chain_config)
except AmbigiousFileSystem:
exit_because_ambigious_filesystem(logger)
except MissingPath as e:
msg = (
"\n"
"It appears that {} does not exist.\n"
"Trinity does not attempt to create directories outside of its root path\n"
"Either manually create the path or ensure you are using a data directory\n"
"inside the XDG_TRINITY_ROOT path"
).format(e.path)
logger.error(msg)
sys.exit(1)
logger, log_queue, listener = setup_trinity_file_and_queue_logging(
logger,
formatter,
handler_stream,
chain_config,
log_level
)
display_launch_logs(chain_config)
# if console command, run the trinity CLI
if args.subcommand == 'attach':
run_console(chain_config, not args.vanilla_shell)
sys.exit(0)
# start the listener thread to handle logs produced by other processes in
# the local logger.
listener.start()
extra_kwargs = {
'log_queue': log_queue,
'log_level': log_level,
'profile': args.profile,
}
# First initialize the database process.
database_server_process = ctx.Process(
target=run_database_process,
args=(
chain_config,
LevelDB,
),
kwargs=extra_kwargs,
)
networking_process = ctx.Process(
target=launch_node,
args=(args, chain_config, ),
kwargs=extra_kwargs,
)
# start the processes
database_server_process.start()
logger.info("Started DB server process (pid=%d)", database_server_process.pid)
wait_for_ipc(chain_config.database_ipc_path)
networking_process.start()
logger.info("Started networking process (pid=%d)", networking_process.pid)
try:
if args.subcommand == 'console':
run_console(chain_config, not args.vanilla_shell)
else:
networking_process.join()
except KeyboardInterrupt:
# When a user hits Ctrl+C in the terminal, the SIGINT is sent to all processes in the
# foreground *process group*, so both our networking and database processes will terminate
# at the same time and not sequentially as we'd like. That shouldn't be a problem but if
# we keep getting unhandled BrokenPipeErrors/ConnectionResetErrors like reported in
# https://github.com/ethereum/py-evm/issues/827, we might want to change the networking
# process' signal handler to wait until the DB process has terminated before doing its
# thing.
# Notice that we still need the kill_process_gracefully() calls here, for when the user
# simply uses 'kill' to send a signal to the main process, but also because they will
# perform a non-gracefull shutdown if the process takes too long to terminate.
logger.info('Keyboard Interrupt: Stopping')
kill_process_gracefully(database_server_process, logger)
logger.info('DB server process (pid=%d) terminated', database_server_process.pid)
# XXX: This short sleep here seems to avoid us hitting a deadlock when attempting to
# join() the networking subprocess: https://github.com/ethereum/py-evm/issues/940
import time; time.sleep(0.2) # noqa: E702
kill_process_gracefully(networking_process, logger)
logger.info('Networking process (pid=%d) terminated', networking_process.pid)
def run_console(chain_config: ChainConfig, vanilla_shell_args: bool) -> None:
logger = logging.getLogger("trinity")
try:
console(chain_config.jsonrpc_ipc_path, use_ipython=vanilla_shell_args)
except FileNotFoundError as err:
logger.error(str(err))
sys.exit(1)
@setup_cprofiler('run_database_process')
@with_queued_logging
def run_database_process(chain_config: ChainConfig, db_class: Type[BaseDB]) -> None:
base_db = db_class(db_path=chain_config.database_dir)
serve_chaindb(chain_config, base_db)
def exit_because_ambigious_filesystem(logger: logging.Logger) -> None:
logger.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)
sys.exit(1)
async def exit_on_signal(service_to_exit: BaseService) -> None:
loop = asyncio.get_event_loop()
sigint_received = asyncio.Event()
for sig in [signal.SIGINT, signal.SIGTERM]:
# TODO also support Windows
loop.add_signal_handler(sig, sigint_received.set)
await sigint_received.wait()
try:
await service_to_exit.cancel()
finally:
loop.stop()
@setup_cprofiler('launch_node')
@with_queued_logging
def launch_node(args: Namespace, chain_config: ChainConfig) -> None:
NodeClass = chain_config.node_class
# Temporary hack: We setup a second instance of the PluginManager.
# The first instance was only to configure the ArgumentParser whereas
# for now, the second instance that lives inside the networking process
# performs the bulk of the work. In the future, the PluginManager
# should probably live in its own process and manage whether plugins
# run in the shared plugin process or spawn their own.
plugin_manager = setup_plugins()
plugin_manager.broadcast(TrinityStartupEvent(
args,
chain_config
))
node = NodeClass(plugin_manager, chain_config)
run_service_until_quit(node)
def display_launch_logs(chain_config: ChainConfig) -> None:
logger = logging.getLogger('trinity')
logger.info(TRINITY_HEADER)
logger.info(construct_trinity_client_identifier())
logger.info("Trinity DEBUG log file is created at %s", str(chain_config.logfile_path))
def run_service_until_quit(service: BaseService) -> None:
loop = asyncio.get_event_loop()
asyncio.ensure_future(exit_on_signal(service))
asyncio.ensure_future(service.run())
loop.run_forever()
loop.close()
def setup_plugins() -> PluginManager:
plugin_manager = PluginManager()
# TODO: Implement auto-discovery of plugins based on some convention/configuration scheme
plugin_manager.register(ENABLED_PLUGINS)
return plugin_manager
|
py | 1a364389e5b7c98f9ac435300c02fae4ad9d0e01 | # Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ********************************************* AutodiffComposition *************************************************
"""
.. _AutodiffComposition_Overview:
Overview
--------
AutodiffComposition is a subclass of `Composition <Composition>` that trains models more quickly by integrating with
`PyTorch <https://pytorch.org/>`_, a popular machine learning library. In situations with training,
AutodiffComposition is used similarly to a Composition, but is much faster.
The `xor_in_psyneulink_and_pytorch.py` script (in the Scripts folder of the PsyNeuLink source code) is an example of
how to use AutodiffComposition. The script also gives a comparison of runtimes.
.. _AutodiffComposition_Creation:
Creating an AutodiffComposition
-------------------------------
An AutodiffComposition can be created by calling the constructor, and then adding `Components <Component>` using the
add methods of its parent class `Composition`. The most unusual argument in initialization is
**param_init_from_pnl**, which controls how parameters are set up for the internal PyTorch representation of the model.
If set to True:
* Only weight parameters that correspond to projections are created. No trainable bias parameters are created, as they
don’t exist for the autodiff composition’s mechanisms.
* The weight parameters are initialized to be perfectly identical to the autodiff composition’s projections - the
tensor of the parameter object corresponding to a particular projection not only has the same dimensionality as
the projection’s matrix, it has the same exact values.
* Pytorch functions representing mechanism functions incorporate their scalar, untrainable biases.
If set to False:
* Both weight parameters corresponding to projections and trainable bias parameters for mechanisms are created.
* Weight parameters have the same dimensionality as their corresponding projections. However, their values - and those
of the bias parameters - are sampled from a random distribution.
* Though trainable biases now exist, Pytorch functions representing mechanism functions still incorporate their scalar,
untrainable biases.
.. warning:: Do not add or remove Mechanisms or Projections to an AutodiffComposition after it has been run for the
first time. Unlike an ordinary Composition, AutodiffComposition does not support this functionality.
Two other initialization arguments are **patience** and **min_delta**, allow the model to halt training early. The
model tracks how many consecutive 'bad' epochs of training have failed to significantly reduce the model's loss. Once
this number exceeds **patience**, the model stops training. By default, **patience** is ``None``, and the model
will train for the number of specified epochs and will not stop training early.
**min_delta** defines what threshold counts as a significant reduction in model loss. By default it is zero, in which
case any reduction in loss counts as a significant reduction. If **min_delta** is large and positive, the model tends to
stop earlier because it views fewer epochs as 'good'.
**learning_rate** specifies the learning rate for this run (default 0.001), which is passed to the **optimizer**
argument. **optimizer** specifies the kind of optimizer used in training. The current options are 'sgd' (the default)
or 'adam'.
**learning_enabled** specifies whether the AutodiffComposition should learn, and it defaults to True. When True, the
AutodiffComposition trains using PyTorch, as normal. When False, the AutodiffComposition acts like an ordinary
Composition, which does not change weights. `learning_enabled <AutodiffComposition.learning_enabled>` is also an
attribute, which can be toggled between runs.
**optimizer_type** specifies the kind of optimizer used in training. The current options are 'sgd' (which is the
default) or 'adam'.
**weight_decay** specifies the L2 penalty (which discourages large weights) used by the optimizer. This defaults to 0.
**loss_spec** specifies the loss function for training. It can be a string or a PyTorch loss function. The current
options for strings are 'mse' (the default), 'crossentropy', 'l1', 'nll', 'poissonnll', and 'kldiv'. These refer to
Mean Squared Error, Cross Entropy, L1 loss, Negative Log Likelihood loss, Poisson Negative Log Likelihood, and KL
Divergence respectively. The **loss_spec** can also be any PyTorch loss function, including a custom-written one. For a
list of PyTorch loss functions, see https://pytorch.org/docs/stable/nn.html#loss-functions. For information on writing
a custom loss function, see https://pytorch.org/docs/master/notes/extending.html and
https://discuss.pytorch.org/t/build-your-own-loss-function-in-pytorch/235
**randomize** specifies whether the order of inputs will be randomized in each epoch. (In each epoch, all inputs are
run, but if **randomize** is True then the order in which inputs are within an epoch is random.)
**refresh_losses** specifies whether the `losses` attribute is refreshed for each call to `run()`. If False, the losses
of each run are appended to the `losses` attribute. If True, the losses of each run overwrite `losses` instead.
**force_no_retain_graph** defaults to False. If True, the AutodiffComposition does not use the `retain_graph` option
when computing PyTorch gradient. This can reduce memory usage. However, it breaks recurrent networks, so it should only
be used when the network is not recurrent.
.. note::
The AutodiffComposition detachs all gradients between epochs of training. For more information on why this is done,
see `here <bit.ly/2t2ZkyR>` or `here <bit.ly/2RGuMNg>`.
.. _AutodiffComposition_Structure:
Structure
---------
AutodiffComposition has all the attributes of its parent class `Composition`, in addition to several more.
The `target_CIM <AutodiffComposition.target_CIM>` attribute is analogous to the `input_CIM <Composition.input_CIM>` of
any Composition, but instead of providing inputs, provides targets for the AutodiffComposition.
The `pytorch_representation <AutodiffComposition.pytorch_representation>` attribute holds the PyTorch representation
of the PsyNeuLink model that AutodiffComposition contains.
The `losses <AutodiffComposition.losses>` attribute tracks the average loss for each training epoch.
As mentioned above, the `learning_enabled <AutodiffComposition.learning_enabled>` attribute can be toggled to determine
whether the AutodiffComposition learns or whether it executes like an ordinary Composition.
The `optimizer <AutodiffComposition.optimizer>` attribute contains the PyTorch optimizer function used for learning. It
is determined at initialization by the **optimizer_type**, **learning_rate**, and **weight_decay** arguments.
The `loss <AutodiffComposition.loss>` attribute contains the PyTorch loss function used for learning. It is determined
at initialization by the **loss_spec** argument.
.. _AutodiffComposition_Execution:
Execution
---------
Most arguments to AutodiffComposition's `run` or `execute` methods are the same as in a Composition. When
`learning_enabled <AutodiffComposition.learning_enabled>` is False, the arguments are the same, since in this
case the AutodiffComposition executes like a Composition.
However, if `learning_enabled <AutodiffComposition.learning_enabled>` is True, the **inputs** argument
format is different. If `learning_enabled <AutodiffComposition.learning_enabled>` is True, then **inputs** should be a
dictionary with required keys "inputs" and "targets", and optional key "epochs". The value at "inputs" should be a
dictionary relating origin mechanisms to their inputs. The value at "targets" should be a dictionary relating terminal
mechanisms to their inputs. The value at "epochs" is an integer stating the number of epochs of training (i.e. how many
times all inputs and targets are run). It defaults to 1. Here is an example of creating a simple AutodiffComposition
and specifying inputs and targets:
>>> import psyneulink as pnl
>>> # set up PsyNeuLink Components
>>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear, size = 3)
>>> my_mech_2 = pnl.TransferMechanism(function=pnl.Linear, size = 2)
>>> my_projection = pnl.MappingProjection(matrix=np.random.randn(3,2),
... sender=my_mech_1,
... receiver=my_mech_2)
>>> # create AutodiffComposition
>>> my_autodiff = pnl.AutodiffComposition()
>>> my_autodiff.add_node(my_mech_1)
>>> my_autodiff.add_node(my_mech_1)
>>> my_autodiff.add_projection(sender=my_mech_1, projection=my_projection, receiver=my_mech_2)
>>> # input specification
>>> my_inputs = {my_mech_1: [[1, 2, 3]]}
>>> my_targets = {my_mech_2: [[4, 5]]}
>>> input_dict = {"inputs": my_inputs, "targets": my_targets, "epochs": 2}
>>> my_autodiff.run(inputs = input_dict)
Logging
-------
Logging currently works differently in AutodiffComposition than in Composition. In an AutodiffComposition, no logging
is done by default, because logging substantially (roughly by 30%) slows down AutodiffComposition. If you wish for all
projection weights and mechanism values to be logged during execution or training of AutodiffComposition, you must
set the **do_logging** argument of the ``run()`` method to ``True``. Logging with AutodiffComposition is slightly hacked
together, so the time and context in the log are not meaningful, only the logged value is meaningful.
Nested Execution
----------------
COMMENT:
Need to add link to docs about nesting ordinary Compositions, once those docs are written.
COMMENT
In general, an AutodiffComposition may be nested inside another Composition, like ordinary Composition nesting. However,
there are a few differences. The input format of an AutodiffComposition with learning enabled is quite unusual. Thus,
when learning is enabled, the AutodiffComposition must be an origin mechanism of the Composition.
.. note::
Like with all nested Compositions, you must call an AutodiffComposition's ``_analyze_graph()`` method
(or execute the AutodiffComposition) before nesting it.
However, when learning is not enabled, AutodiffComposition works just like an ordinary Composition, in theory. Thus, an
AutodiffComposition with learning not enabled receives input in the same format as an ordinary Composition, and can
therefore be placed anywhere in a Composition.
.. note::
Using an AutodiffComposition not as an origin mechanism is currently buggy, and might produce unexpected results.
Below is an example script showing how to nest an AutodiffComposition with learning enabled.
>>> import psyneulink as pnl
>>> # set up PsyNeuLink Components
>>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear, size = 3)
>>> my_mech_2 = pnl.TransferMechanism(function=pnl.Linear, size = 2)
>>> my_projection = pnl.MappingProjection(matrix=np.random.randn(3,2),
... sender=my_mech_1,
... receiver=my_mech_2)
>>> # create AutodiffComposition
>>> my_autodiff = pnl.AutodiffComposition()
>>> my_autodiff.add_node(my_mech_1)
>>> my_autodiff.add_node(my_mech_1)
>>> my_autodiff.add_projection(sender=my_mech_1, projection=my_projection, receiver=my_mech_2)
>>> my_autodiff._analyze_graph() # alternatively, my_autodiff.run( ... )
>>>
>>> # input specification
>>> my_inputs = {my_mech_1: [[1, 2, 3]]}
>>> my_targets = {my_mech_2: [[4, 5]]}
>>> input_dict = {"inputs": my_inputs, "targets": my_targets, "epochs": 2}
>>>
>>> parentComposition = pnl.Composition()
>>> parentComposition.add_node(my_autodiff)
>>>
>>> training_input = {my_autodiff: input_dict}
>>> result1 = parentComposition.run(inputs=input)
>>>
>>> my_autodiff.learning_enabled = False
>>> no_training_input = {my_autodiff: my_inputs}
>>> result2 = parentComposition.run(inputs=no_training_input)
.. _Composition_Class_Reference:
Class Reference
---------------
"""
from psyneulink.core.components.functions.transferfunctions import Linear, Logistic, ReLU
from psyneulink.core.components.mechanisms.processing.compositioninterfacemechanism import CompositionInterfaceMechanism
from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection
from psyneulink.core.compositions.composition import Composition
from psyneulink.core.compositions.composition import CompositionError
from psyneulink.core.globals.context import ContextFlags
from psyneulink.core.globals.keywords import SOFT_CLAMP
from psyneulink.core.scheduling.scheduler import Scheduler
import numpy as np
import copy
from collections import Iterable
from toposort import toposort
import logging
try:
import torch
from torch import nn
import torch.optim as optim
from psyneulink.library.compositions.pytorchmodelcreator import PytorchModelCreator
torch_available = True
except ImportError:
torch_available = False
logger = logging.getLogger(__name__)
__all__ = [
'AutodiffComposition', 'AutodiffCompositionError'
]
class AutodiffCompositionError(CompositionError):
def __init__(self, error_value):
self.error_value = error_value
def __str__(self):
return repr(self.error_value)
class AutodiffComposition(Composition):
"""
AutodiffComposition( \
param_init_from_pnl=True, \
patience=None, \
min_delta=0, \
learning_rate=0.001, \
learning_enabled=True, \
optimizer_type=None, \
loss_spec=None, \
randomize=False, \
refresh_losses=False, \
name="autodiff_composition")
Subclass of `Composition` that trains models more quickly by integrating with PyTorch.
Arguments
---------
param_init_from_pnl : boolean : default True
a Boolean specifying how parameters are initialized. (See
`Creating an AutodiffComposition <AutodiffComposition_Creation>` for details)
patience : int or None : default None
**patience** allows the model to stop training early, if training stops reducing loss. The model tracks how many
consecutive epochs of training have failed to reduce the model's loss. When this number exceeds **patience**,
the model stops training early. If **patience** is ``None``, the model will train for the number
of specified epochs and will not stop training early.
min_delta : float : default 0
the minimum reduction in average loss that an epoch must provide in order to qualify as a 'good' epoch.
Used for early stopping of training, in combination with **patience**.
learning_rate : float : default 0.001
the learning rate, which is passed to the optimizer.
learning_enabled : boolean : default True
specifies whether the AutodiffComposition should learn. When True, the AutodiffComposition trains using PyTorch.
When False, the AutodiffComposition executes just like an ordinary Composition
optimizer_type : str : default 'sgd'
the kind of optimizer used in training. The current options are 'sgd' or 'adam'.
weight_decay : float : default 0
specifies the L2 penalty (which discourages large weights) used by the optimizer.
loss_spec : str or PyTorch loss function : default 'mse'
specifies the loss function for training. The current string options are 'mse' (the default), 'crossentropy',
'l1', 'nll', 'poissonnll', and 'kldiv'. Any PyTorch loss function can work here, such as ones from
https://pytorch.org/docs/stable/nn.html#loss-functions
randomize: boolean : default False
specifies whether the order of inputs will be randomized in each epoch. (In each epoch, all inputs are run, but
if **randomize** is True then the order of inputs within an epoch is random.)
refresh_losses : boolean: default False
specifies whether the `losses` attribute is refreshed for each call to `run()`. If False, the losses of each run
are appended to the `losses` attribute. If True, the losses of each run overwrite `losses` instead.
Attributes
----------
pytorch_representation : PytorchModelCreator
the PyTorch representation of the PsyNeuLink model
losses : list of floats
tracks the average loss for each training epoch
patience : int or None : default None
allows the model to stop training early, if training stops reducing loss. The model tracks how many
consecutive epochs of training have failed to reduce the model's loss. When this number exceeds **patience**,
the model stops training early. If **patience** is ``None``, the model will train for the number
of specified epochs and will not stop training early.
min_delta : float : default 0
the minimum reduction in average loss that an epoch must provide in order to qualify as a 'good' epoch.
Used for early stopping of training, in combination with **patience**.
learning_enabled : boolean : default True
specifies whether the AutodiffComposition should learn. When True, the AutodiffComposition trains using PyTorch.
When False, the AutodiffComposition executes just like an ordinary Composition. This attribute can be toggled.
learning_rate : float: default 0.001
the learning rate for training. Currently only used to initialize the `optimizer` attribute.
optimizer : PyTorch optimizer function
the optimizer used for training. Depends on the **optimizer_type**, **learning_rate**, and **weight_decay**
arguments from initialization.
loss : PyTorch loss function
the loss function used for training. Depends on the **loss_spec** argument from initialization.
name : str : default LeabraMechanism-<index>
the name of the Mechanism.
Specified in the **name** argument of the constructor for the Projection;
if not specified, a default is assigned by `MechanismRegistry`
(see :doc:`Registry <LINK>` for conventions used in naming, including for default and duplicate names).
Returns
-------
instance of AutodiffComposition : AutodiffComposition
"""
class Parameters(Composition.Parameters):
"""
Attributes
----------
learning_rate
see `learning_rate <AutodiffComposition.learning_rate>`
:default value: 0.001
:type: float
losses
see `losses <AutodiffComposition.losses>`
:default value: None
:type:
min_delta
see `min_delta <AutodiffComposition.min_delta>`
:default value: 0
:type: int
optimizer
see `optimizer <AutodiffComposition.optimizer>`
:default value: None
:type:
patience
see `patience <AutodiffComposition.patience>`
:default value: None
:type:
pytorch_representation
see `pytorch_representation <AutodiffComposition.pytorch_representation>`
:default value: None
:type:
"""
optimizer = None
learning_rate = .001
losses = None
patience = None
min_delta = 0
pytorch_representation = None
# TODO (CW 9/28/18): add compositions to registry so default arg for name is no longer needed
def __init__(self,
param_init_from_pnl=True,
patience=None,
min_delta=0,
learning_rate=0.001,
learning_enabled=True,
optimizer_type='sgd',
weight_decay=0,
loss_spec='mse',
randomize=None,
refresh_losses=False,
disable_cuda=False,
cuda_index=None,
force_no_retain_graph=False,
name="autodiff_composition"):
self.learning_enabled = True
if not torch_available:
raise AutodiffCompositionError('Pytorch python module (torch) is not installed. Please install it with '
'`pip install torch` or `pip3 install torch`')
# params = self._assign_args_to_param_dicts(learning_rate=learning_rate)
# since this does not pass params argument, defaults will not be automatically set..
super(AutodiffComposition, self).__init__(name=name)
# super(AutodiffComposition, self).__init__(params=params, name=name)
self.learning_enabled = learning_enabled
self.optimizer_type = optimizer_type
self.loss_spec = loss_spec
self.randomize = randomize
self.refresh_losses = refresh_losses
# pytorch representation of model and associated training parameters
self.pytorch_representation = None
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.optimizer = None
self.loss = None
self.force_no_retain_graph = force_no_retain_graph
# user indication of how to initialize pytorch parameters
self.param_init_from_pnl = param_init_from_pnl
# keeps track of average loss per epoch
self.losses = []
# ordered execution sets for the pytorch model
self.execution_sets = None
# patience is the "bad" epochs (with no progress in average loss) the model tolerates in one training session
# before ending training
self.patience = patience
self.min_delta = min_delta
# CW 11/1/18: maybe we should make scheduler a property, like in Composition
self.scheduler = None
if not disable_cuda and torch.cuda.is_available():
if cuda_index is None:
self.device = torch.device('cuda')
else:
self.device = torch.device('cuda:' + cuda_index)
else:
self.device = torch.device('cpu')
# CLEANUP: move some of what's done in the methods below to a "validate_params" type of method
def _build_pytorch_representation(self, execution_id = None):
if self.scheduler is None: # if learning_enabled has never been run yet
self.scheduler = Scheduler(graph=self.graph_processing)
if self.execution_sets is None:
self.execution_sets = list(self.scheduler.run())
if self.parameters.pytorch_representation.get(execution_id) is None:
model = PytorchModelCreator(self.graph_processing,
self.param_init_from_pnl,
self.execution_sets,
self.device,
execution_id)
self.parameters.pytorch_representation.set(model, execution_id)
# Set up optimizer function
old_opt = self.parameters.optimizer.get(execution_id)
if old_opt is not None:
logger.warning("Overwriting optimizer for AutodiffComposition {}! Old optimizer: {}".format(
self, old_opt))
opt = self._make_optimizer(self.optimizer_type, self.learning_rate, self.weight_decay, execution_id)
self.parameters.optimizer.set(opt, execution_id)
# Set up loss function
if self.loss is not None:
logger.warning("Overwriting loss function for AutodiffComposition {}! Old loss function: {}".format(
self, self.loss))
self.loss = self._get_loss(self.loss_spec)
def _make_optimizer(self, optimizer_type, learning_rate, weight_decay, execution_id):
if not isinstance(learning_rate, (int, float)):
raise AutodiffCompositionError("Learning rate must be an integer or float value.")
if optimizer_type not in ['sgd', 'adam']:
raise AutodiffCompositionError("Invalid optimizer specified. Optimizer argument must be a string. "
"Currently, Stochastic Gradient Descent and Adam are the only available "
"optimizers (specified as 'sgd' or 'adam').")
params = self.parameters.pytorch_representation.get(execution_id).parameters()
if optimizer_type == 'sgd':
return optim.SGD(params, lr=learning_rate, weight_decay=weight_decay)
else:
return optim.Adam(params, lr=learning_rate, weight_decay=weight_decay)
def _get_loss(self, loss_spec):
if not isinstance(self.loss_spec, str):
return self.loss_spec
elif loss_spec == 'mse':
return nn.MSELoss(reduction='sum')
elif loss_spec == 'crossentropy':
return nn.CrossEntropyLoss(reduction='sum')
elif loss_spec == 'l1':
return nn.L1Loss(reduction='sum')
elif loss_spec == 'nll':
return nn.NLLLoss(reduction='sum')
elif loss_spec == 'poissonnll':
return nn.PoissonNLLLoss(reduction='sum')
elif loss_spec == 'kldiv':
return nn.KLDivLoss(reduction='sum')
else:
raise AutodiffCompositionError("Loss type {} not recognized. Loss argument must be a string or function. "
"Currently, the recognized loss types are Mean Squared Error, Cross Entropy,"
" L1 loss, Negative Log Likelihood loss, Poisson Negative Log Likelihood, "
"and KL Divergence. These are specified as 'mse', 'crossentropy', 'l1', "
"'nll', 'poissonnll', and 'kldiv' respectively.".format(loss_spec))
def _has_required_keys(self, input_dict):
required_keys = {"inputs", "targets"}
return required_keys.issubset(set(input_dict.keys()))
def _adjust_stimulus_dict(self, inputs):
if self.learning_enabled:
if isinstance(inputs, dict):
if self._has_required_keys(inputs):
return [inputs]
raise AutodiffCompositionError("Invalid input specification.")
elif isinstance(inputs, list):
for input_dict in inputs:
if not self._has_required_keys(input_dict):
raise AutodiffCompositionError("Invalid input specification.")
return inputs
return super(AutodiffComposition, self)._adjust_stimulus_dict(inputs)
# performs forward computation for one input
def autodiff_processing(self, inputs, execution_id=None, do_logging=False):
pytorch_representation = self.parameters.pytorch_representation.get(execution_id)
# run the model on inputs - switch autograd off for this (we don't need it)
with torch.no_grad():
tensor_outputs = pytorch_representation.forward(inputs, execution_id=execution_id, do_logging=do_logging)
# get outputs back into numpy
outputs = []
for i in range(len(tensor_outputs)):
outputs.append(tensor_outputs[i].numpy().copy())
return outputs
# performs learning/training on all input-target pairs it recieves for given number of epochs
def autodiff_training(self, inputs, targets, epochs, execution_id=None, do_logging=False):
# FIX CW 11/1/18: this value of num_inputs assumes all inputs have same length, and that the length of
# the input for an origin component equals the number of desired trials. We could clean this up
# by perhaps using modular arithmetic on t, or by being more explicit about number of desired trials
first_input_value = list(inputs.values())[0]
num_inputs = len(first_input_value)
patience = self.parameters.patience.get(execution_id)
if patience is not None:
# set up object for early stopping
early_stopper = EarlyStopping(patience=patience, min_delta=self.parameters.min_delta.get(execution_id))
# if training over trial sets in random order, set up array for mapping random order back to original order
if self.randomize:
rand_train_order_reverse = np.zeros(num_inputs)
# get total number of output neurons from the dimensionality of targets on the first trial
# (this is for computing average loss across neurons on each trial later)
out_size = 0
for target in targets.values():
out_size += len(target)
# iterate over epochs
for epoch in range(epochs):
# if training in random order, generate random order and set up mapping
# from random order back to original order
if self.randomize:
rand_train_order = np.random.permutation(num_inputs)
rand_train_order_reverse[rand_train_order] = np.arange(num_inputs)
# set up array to keep track of losses on epoch
curr_losses = np.zeros(num_inputs)
# reset temporary list to keep track of most recent outputs
outputs = []
self.parameters.pytorch_representation.get(execution_id).detach_all()
# self.parameters.pytorch_representation.get(execution_id).reset_all()
# iterate over inputs, targets
for t in range(num_inputs):
if self.randomize:
input_index = rand_train_order[t]
else:
input_index = t
curr_tensor_inputs = {}
curr_tensor_targets = {}
for component in inputs.keys():
input = inputs[component][input_index]
curr_tensor_inputs[component] = torch.tensor(input, device=self.device).double()
for component in targets.keys():
target = targets[component][input_index]
curr_tensor_targets[component] = torch.tensor(target, device=self.device).double()
# do forward computation on current inputs
curr_tensor_outputs = self.parameters.pytorch_representation.get(execution_id).forward(
curr_tensor_inputs,
execution_id,
do_logging
)
# compute total loss across output neurons for current trial
curr_loss = torch.zeros(1).double()
for component in curr_tensor_outputs.keys():
# possibly add custom loss option, which is a loss function that takes many args
# (outputs, targets, weights, and more) and returns a scalar
curr_loss += self.loss(curr_tensor_outputs[component], curr_tensor_targets[component])
# save average loss across all output neurons on current trial
curr_losses[t] = (curr_loss[0].item())/out_size
optimizer = self.parameters.optimizer.get(execution_id)
# backpropagate to compute gradients and perform learning update for parameters
optimizer.zero_grad()
curr_loss = curr_loss/2
if self.force_no_retain_graph:
curr_loss.backward(retain_graph=False)
else:
curr_loss.backward(retain_graph=True)
self.parameters.pytorch_representation.get(execution_id).copy_weights_to_psyneulink(execution_id)
optimizer.step()
# save outputs of model if this is final epoch
curr_output_list = []
for input_state in self.output_CIM.input_states:
assert(len(input_state.all_afferents) == 1) # CW 12/05/18, this assert may eventually be outdated
component = input_state.all_afferents[0].sender.owner
curr_output_list.append(curr_tensor_outputs[component].detach().numpy().copy())
# for component in curr_tensor_outputs.keys():
# curr_output_list.append(curr_tensor_outputs[component].detach().numpy().copy())
outputs.append(curr_output_list)
# save average loss on the current epoch
average_loss = np.mean(curr_losses)
self.parameters.losses.get(execution_id).append(average_loss)
# update early stopper with most recent average loss
if self.parameters.patience.get(execution_id) is not None:
should_stop = early_stopper.step(average_loss)
if should_stop:
logger.warning('Stopped training early after {} epochs'.format(epoch))
if self.randomize:
outputs_list = [None] * len(outputs)
for i in range(len(outputs)):
outputs_list[i] = outputs[int(rand_train_order_reverse[i])]
return outputs_list
else:
return outputs
if self.randomize: # save outputs in a list in correct order, return them
outputs_list = [None] * len(outputs)
for i in range(len(outputs)):
outputs_list[i] = outputs[int(rand_train_order_reverse[i])]
return outputs_list
else:
return outputs
def execute(self,
inputs=None,
autodiff_stimuli=None,
do_logging=False,
scheduler_processing=None,
termination_processing=None,
call_before_time_step=None,
call_before_pass=None,
call_after_time_step=None,
call_after_pass=None,
execution_id=None,
base_execution_id=None,
clamp_input=SOFT_CLAMP,
targets=None,
runtime_params=None,
skip_initialization=False,
bin_execute=False,
context=None
):
execution_id = self._assign_execution_ids(execution_id)
if self.learning_enabled:
# TBI: How are we supposed to use base_execution_id and statefulness here?
# TBI: can we call _build_pytorch_representation in _analyze_graph so that pytorch
# model may be modified between runs?
self._analyze_graph() # ADDED by CW 12/17/18: unsure if correct here
self._build_pytorch_representation(execution_id)
autodiff_inputs = inputs["inputs"]
autodiff_targets = inputs["targets"]
autodiff_epochs = 1
if "epochs" in inputs:
autodiff_epochs = inputs["epochs"]
output = self.autodiff_training(autodiff_inputs, autodiff_targets, autodiff_epochs, execution_id, do_logging)
ctx = self.output_CIM.parameters.context.get(execution_id)
# new_ctx = copy.deepcopy(ctx)
# new_ctx.execution_phase = ContextFlags.PROCESSING
# self.output_CIM.parameters.context.set(new_ctx, execution_id=execution_id)
if ctx is not None: # HACK: CW 12/18/18 for some reason context isn't set correctly
ctx.execution_phase = ContextFlags.PROCESSING
# note that output[-1] might not be the truly most recent value
# HACK CW 2/5/19: the line below is a hack. In general, the output_CIM of an AutodiffComposition
# is not having its parameters populated correctly, and this should be fixed in the long run.
self.output_CIM.execute(input=output[-1], execution_id=execution_id, context=ContextFlags.PROCESSING)
return output
# learning not enabled. execute as a normal composition
return super(AutodiffComposition, self).execute(inputs=inputs,
scheduler_processing=scheduler_processing,
termination_processing=termination_processing,
call_before_time_step=call_before_time_step,
call_before_pass=call_before_pass,
call_after_time_step=call_after_time_step,
call_after_pass=call_after_pass,
execution_id=execution_id,
base_execution_id=base_execution_id,
clamp_input=clamp_input,
runtime_params=runtime_params,
skip_initialization=skip_initialization,
bin_execute=bin_execute,
context=context)
# what the user calls for doing processing/training, similar to the run function of the normal composition
def run(
self,
inputs=None,
do_logging=False,
scheduler_processing=None,
termination_processing=None,
execution_id=None,
num_trials=1,
call_before_time_step=None,
call_after_time_step=None,
call_before_pass=None,
call_after_pass=None,
call_before_trial=None,
call_after_trial=None,
clamp_input=SOFT_CLAMP,
bin_execute=False,
initial_values=None,
reinitialize_values=None,
runtime_params=None,
context=None):
# TBI: Handle trials, timesteps, etc
execution_id = self._assign_execution_ids(execution_id)
if self.learning_enabled:
self._analyze_graph()
if self.refresh_losses or (self.parameters.losses.get(execution_id) is None):
self.parameters.losses.set([], execution_id)
adjusted_stimuli = self._adjust_stimulus_dict(inputs)
if num_trials is None:
num_trials = len(adjusted_stimuli)
results = []
for trial_num in range(num_trials):
stimulus_index = trial_num % len(adjusted_stimuli)
trial_output = self.execute(
inputs=adjusted_stimuli[stimulus_index],
execution_id=execution_id,
do_logging=do_logging,
)
results.append(trial_output)
return results
else:
return super(AutodiffComposition, self).run(inputs=inputs,
scheduler_processing=scheduler_processing,
termination_processing=termination_processing,
execution_id=execution_id,
num_trials=num_trials,
call_before_time_step=call_before_time_step,
call_after_time_step=call_after_time_step,
call_before_pass=call_before_pass,
call_after_pass=call_after_pass,
call_before_trial=call_before_trial,
call_after_trial=call_after_trial,
clamp_input=clamp_input,
bin_execute=bin_execute,
initial_values=initial_values,
reinitialize_values=reinitialize_values,
runtime_params=runtime_params,
context=context)
# validates properties of the autodiff composition, and arguments to run, when run is called
def _validate_params(self, targets, epochs):
# set up processing graph and dictionary (for checking if recurrence is present later)
processing_graph = self.graph_processing
topo_dict = {}
# raise error if composition is empty
if len([vert.component for vert in self.graph.vertices]) == 0:
raise AutodiffCompositionError("{0} has no mechanisms or projections to execute."
.format(self.name))
# iterate over nodes in processing graph
for node in processing_graph.vertices:
# raise error if a node is a composition
if isinstance(node.component, Composition):
raise AutodiffCompositionError("{0} was added as a node to {1}. Compositions cannot be "
"added as nodes to Autodiff Compositions."
.format(node.component, self.name))
# raise error if a node's mechanism doesn't have a Linear, Logistic, or ReLU function
if not isinstance(node.component.function, (Linear, Logistic, ReLU)):
raise AutodiffCompositionError("Function {0} of mechanism {1} in {2} is not a valid function "
"for a Autodiff Composition. Functions of mechanisms in "
"Autodiff Compositions can only be Linear, Logistic, or ReLU."
.format(node.component.function, node.component, self.name))
# raise error if a node has more than one input state
if len(node.component.input_states) > 1:
raise AutodiffCompositionError("Mechanism {0} of {1} has more than one input state. Autodiff "
"Compositions only allow mechanisms to have one input state. The "
"dimensionality of this state's value will become the dimensionality of "
"the tensor representing the state's mechanism in the underlying "
"Pytorch model."
.format(node.component, self.name))
# raise error if any parent of current node creates a cycle in the composition (ie. if there's recurrence)
topo_dict[node.component] = set()
for parent in processing_graph.get_parents_from_component(node.component):
topo_dict[node.component].add(parent.component)
try:
list(toposort(topo_dict))
except ValueError:
raise AutodiffCompositionError("Mechanisms {0} and {1} are part of a recurrent path in {2}. "
"Autodiff Compositions currently do not support recurrence."
.format(node.component, parent.component, self.name))
# raise errors if arguments to run are not consistent or we're doing training but there are
# no trainable parameters
if targets is None:
if epochs is not None:
raise AutodiffCompositionError("Number of training epochs specified for {0} but no targets given."
.format(self.name))
else:
if epochs is None:
raise AutodiffCompositionError("Targets specified for {0}, but no number of training epochs given."
.format(self.name))
if len([vert.component for vert in self.graph.vertices if isinstance(vert.component, MappingProjection)]) == 0:
raise AutodiffCompositionError("Targets specified for {0}, but {0} has no trainable parameters."
.format(self.name))
# gives user weights and biases of the model (from the pytorch representation)
def get_parameters(self, execution_id=NotImplemented):
if execution_id is NotImplemented:
execution_id = self.default_execution_id
pytorch_representation = self.parameters.pytorch_representation.get(execution_id)
if pytorch_representation is None:
raise AutodiffCompositionError("{0} has not been run yet so parameters have not been created "
"in Pytorch."
.format(self.name))
weights = pytorch_representation.get_weights_for_projections()
biases = pytorch_representation.get_biases_for_mechanisms()
return weights, biases
class EarlyStopping(object):
def __init__(self, mode='min', min_delta=0, patience=10):
self.mode = mode
self.min_delta = min_delta
self.patience = patience
self.best = None
self.num_bad_epochs = 0
self.is_better = None
self._init_is_better(mode, min_delta)
if patience == 0:
self.is_better = lambda a, b: True
def step(self, metrics):
if self.best is None:
self.best = metrics
return False
if np.isnan(metrics):
return True
if self.is_better(metrics, self.best):
self.num_bad_epochs = 0
self.best = metrics
else:
self.num_bad_epochs += 1
if self.num_bad_epochs >= self.patience:
return True
return False
def _init_is_better(self, mode, min_delta):
if mode not in {'min', 'max'}:
raise ValueError('mode ' + mode + ' is unknown!')
if mode == 'min':
self.is_better = lambda a, best: a < best - min_delta
if mode == 'max':
self.is_better = lambda a, best: a > best + min_delta
|
py | 1a364425908e916ad8554f9355c56037d134836c | #!/usr/bin/env python3
"""
This file is part of pyspex
https://github.com/rmvanhees/pyspex.git
Create new SPEXone Level-1A product with selected data from original
Copyright (c) 2021 SRON - Netherlands Institute for Space Research
All Rights Reserved
License: BSD-3-Clause
"""
import argparse
from datetime import datetime, timezone
from pathlib import Path
import h5py
import numpy as np
from pyspex.lv1_io import L1Aio
# --------------------------------------------------
def inv_sec_of_day(reference_day, sec_of_day,
epoch=datetime(1970, 1, 1, tzinfo=timezone.utc)):
"""
Convert seconds after midnight to CCSDS timestamps
Parameters
----------
reference_day : datetime object
sec_of_day : ndarray
epoch : datetime object, optional
Returns
-------
tuple holding CCSDS timestamps
"""
offs = np.uint32((reference_day - epoch).total_seconds())
ccsds_sec = sec_of_day.astype('u4')
ccsds_subsec = (65536 * (sec_of_day - ccsds_sec)).astype('u2')
return offs + ccsds_sec, ccsds_subsec
# --------------------------------------------------
def main():
"""
Main function of this module
"""
parser = argparse.ArgumentParser(
description=('Copy selected data from one SPEXone L1A product'
' into a new SPEXone L1A product'))
parser.add_argument('--verbose', '-v', action='store_true',
help='be verbose, default be silent')
parser.add_argument('--mps_id', nargs='*', type=int, default=None,
help='select on MPS-ID [comma separated?]')
parser.add_argument('--mon_type', default=None,
help=('Specify monitoring type identifier: '
'MON-DARK, MON-NOISE, MON-NLIN, ...')
# parser.add_argument('--time', nargs=2, default=None,
# help='select on image time [start, end]')
# parser.add_argument('--', default=None, help='')
parser.add_argument('--out', default='.',
help=('name of directory to store the new Level-1A'
' product, default: current working directory'))
parser.add_argument('l1a_product', default=None,
help='name of SPEXone Level-1A product')
args = parser.parse_args()
if args.verbose:
print(args)
l1a_product = Path(args.l1a_product)
if not l1a_product.is_file():
raise FileNotFoundError(f'File {args.l1a_product} does not exist')
# ToDo: check if SPEXone Level-1a product
# ToDo: implement check on data product
out_dir = Path(args.out)
if not out_dir.is_dir():
out_dir.mkdir(mode=0o755, parents=True)
# ----- read data from orignal product -----
# pylint: disable=no-member, unsubscriptable-object
with h5py.File(l1a_product) as fid:
# read image data, detector telemetry and image attributes
# - datasets: img_data, img_hk, img_id, img_sec, img_subsec
img_sec = fid['/image_attributes/image_CCSDS_sec'][:]
img_subsec = fid['/image_attributes/image_CCSDS_subsec'][:]
# obtain reference date
units = fid['/image_attributes/image_time'].attrs['units']
reference_day = datetime.fromisoformat(units[14:].decode('ascii'))
sec_of_day = fid['/image_attributes/image_time'][:]
img_sec, img_subsec = inv_sec_of_day(reference_day, sec_of_day)
img_id = fid['/image_attributes/image_ID'][:]
img_data = fid['/science_data/detector_images'][:]
img_hk = fid['/science_data/detector_telemetry'][:]
# read engineering data
# - datasets: nomhk_data, nomhk_sec, nomhk_subsec
# - datasets: demhk_data
nomhk_data = fid['/engineering_data/NomHK_telemetry'][:]
if nomhk_data.size > 0:
# obtain reference date
units = fid['/engineering_data/HK_tlm_time'].attrs['units']
print(units)
reference_day = datetime.fromisoformat(units[14:].decode('ascii'))
print(reference_day)
sec_of_day = fid['/engineering_data/HK_tlm_time'][:]
nomhk_sec, nomhk_subsec = inv_sec_of_day(reference_day, sec_of_day)
demhk_data = fid['/engineering_data/DemHK_telemetry'][:]
# read additional attributes:
# - inflight
# - EGSE/OGSE, ...
inflight = not fid.attrs['institution'].startswith(b'SRON')
# gse_data = 'gse_data' in fid
# define dimensions
dims = {'number_of_images': img_data.shape[0],
'samples_per_image': img_data.shape[1],
'hk_packets': nomhk_data.size}
# ----- perform data selection -----
# ToDo: implement data selection
# ----- now we can update the name of the output product -----
# - because the production time has changed
# - and when coverage time is changed
if (out_dir / l1a_product.name).is_file() \
and l1a_product.samefile(out_dir / l1a_product.name):
raise OSError('Output will overwrite original product')
# ----- write new output product with selected data -----
with L1Aio(out_dir / l1a_product.name, dims=dims) as l1a:
# write image data, detector telemetry and image attributes
l1a.fill_science(img_data, img_hk, img_id)
l1a.fill_time(img_sec, img_subsec, group='image_attributes')
# write engineering data
if nomhk_data.size > 0:
l1a.fill_nomhk(nomhk_data)
l1a.fill_time(nomhk_sec, nomhk_subsec, group='engineering_data')
if demhk_data.size > 0:
l1a.fill_demhk(demhk_data)
# write global attributes
l1a.fill_global_attrs(inflight=inflight)
# l1a.set_attr('input_files', [Path(x).name for x in args.file_list])
# copy group with EGSE/OGSE data
# *** DO NOT USE: BREAKS NETCDF4 FORMAT ***
# if gse_data:
# print('copy EGSE/OGSE data')
# with h5py.File(l1a_product) as fid_in:
# with h5py.File(out_dir / l1a_product.name, 'r+') as fid_out:
# fid_out.copy(fid_in['gse_data'], 'gse_data')
# --------------------------------------------------
if __name__ == '__main__':
main()
|
py | 1a36443471870c8294ed9f1c28fe02daa147e9be | # coding: utf-8
import pprint
import re
import six
class Tag:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'key': 'str',
'value': 'str'
}
attribute_map = {
'key': 'key',
'value': 'value'
}
def __init__(self, key=None, value=None):
"""Tag - a model defined in huaweicloud sdk"""
self._key = None
self._value = None
self.discriminator = None
if key is not None:
self.key = key
if value is not None:
self.value = value
@property
def key(self):
"""Gets the key of this Tag.
功能描述:标签键
:return: The key of this Tag.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this Tag.
功能描述:标签键
:param key: The key of this Tag.
:type: str
"""
self._key = key
@property
def value(self):
"""Gets the value of this Tag.
功能描述:标签值
:return: The value of this Tag.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this Tag.
功能描述:标签值
:param value: The value of this Tag.
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Tag):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a36463fa21bfdbf7371d3dea48804c34ad71876 | # coding: utf-8
"""Dumb VPR model development"""
import matplotlib.pyplot as plt
def vpr_median(cc_r, km_above_ml=1100):
"""vpr diffs based on median ze above ml"""
z = cc_r.data.zh.iloc[0, :]
zt = cc_r.cl_data.zh.loc[:, km_above_ml]
cl = cc_r.classes()
mz = z.groupby(cl).median()
mzt = zt.groupby(cl).median()
return mz-mzt
if __name__ == '__main__':
plt.ion()
vpr = vpr_median(cc_r)
|
py | 1a364657ccdbae605faa9c8c2263d309d620e916 | """
# Copyright 2021 21CN Corporation Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import unittest
from unittest import mock
from pony.orm import db_session, commit
import utils
from config import base_dir
from core.models import AppInsMapper, VmImageInfoMapper, AppPkgMapper
from task.app_instance_task import do_check_stack_status
from task.app_package_task import do_check_package_status
from task.image_task import do_check_image_status, do_download_then_compress_image, do_check_compress_status, \
do_push_image
from tests.resources.test_data import mock_heat_client, mock_glance_client, MockResponse
class TasksTest(unittest.TestCase):
"""
定时任务单元测试
"""
@mock.patch("task.app_instance_task.create_heat_client")
def test_do_check_stack_status(self, create_heat_client):
"""
测试检查实例状态任务
Returns:
"""
create_heat_client.return_value = mock_heat_client
with db_session:
AppInsMapper(
app_instance_id='appIns01',
host_ip='10.10.10.10',
tenant_id='tenant001',
stack_id='stack001',
operational_status=utils.INSTANTIATING
)
commit()
do_check_stack_status('appIns01')
with db_session:
app_ins_info = AppInsMapper.get(app_instance_id='appIns01')
self.assertEqual(utils.FAILURE, app_ins_info.operational_status)
@mock.patch('task.image_task.add_download_then_compress_image_task')
@mock.patch('task.image_task.create_glance_client')
def test_do_check_image_status(self, create_glance_client, add_download_then_compress_image_task):
"""
Args:
create_glance_client:
Returns:
"""
create_glance_client.return_value = mock_glance_client
add_download_then_compress_image_task.return_value = None
with db_session:
VmImageInfoMapper(
image_id='test_image',
host_ip='10.10.10.10',
image_name='test_image',
status='queued',
tenant_id='test_tenant',
app_package_id='test_package'
)
commit()
do_check_image_status('test_image', '10.10.10.10')
with db_session:
image_info = VmImageInfoMapper.get(image_id='test_image', host_ip='10.10.10.10')
self.assertEqual(utils.ACTIVE, image_info.status)
@mock.patch('task.image_task.add_check_compress_image_task')
@mock.patch('task.image_task.requests')
@mock.patch('task.image_task.create_glance_client')
def test_do_download_then_compress_image(self, create_glance_client, requests, add_check_compress_image_task):
"""
Args:
create_glance_client:
Returns:
"""
create_glance_client.return_value = mock_glance_client
requests.post.return_value = MockResponse({
'status_code': 200,
'json': {
'requestId': 'abcabcabcabc'
}
})
add_check_compress_image_task.return_value = None
with db_session:
VmImageInfoMapper(
image_id='test_image1',
host_ip='10.10.10.11',
image_name='test_image1',
status='active',
tenant_id='test_tenant',
compress_task_status='waiting'
)
commit()
do_download_then_compress_image('test_image1', '10.10.10.11')
with db_session:
image_info = VmImageInfoMapper.get(image_id='test_image1', host_ip='10.10.10.11')
self.assertEqual(utils.COMPRESSING, image_info.compress_task_status)
@mock.patch('task.image_task.add_push_image_task')
@mock.patch('task.image_task.requests')
def test_do_check_compress_status(self, requests, add_push_image_task):
"""
Returns:
"""
requests.get.return_value = MockResponse({
'status_code': 200,
'json': {
'status': 0
}
})
add_push_image_task.return_value = None
with db_session:
VmImageInfoMapper(
image_id='test_image2',
host_ip='10.10.10.10',
image_name='test_image2',
status='active',
tenant_id='test_tenant',
compress_task_status='compressing'
)
commit()
do_check_compress_status('test_image2', '10.10.10.10')
utils.delete_dir(f'{base_dir}/vmImage')
with db_session:
image_info = VmImageInfoMapper.get(image_id='test_image2', host_ip='10.10.10.10')
self.assertEqual(utils.PUSHING, image_info.compress_task_status)
@mock.patch('task.image_task.requests')
def test_do_push_image(self, requests):
requests.post.return_value = MockResponse({
'status_code': 200,
'json': {
'imageId': 'mock_image_id'
}
})
with db_session:
VmImageInfoMapper(
image_id='test_image3',
host_ip='10.10.10.10',
image_name='test_image3',
status='active',
tenant_id='test_tenant',
compress_task_status='pushing'
)
commit()
utils.create_dir(f'{base_dir}/vmImage/10.10.10.10')
with open(f'{base_dir}/vmImage/10.10.10.10/test_image3.qcow2', 'w') as image_file:
image_file.writelines('abcabcabc')
do_push_image('test_image3', '10.10.10.10')
utils.delete_dir(f'{base_dir}/vmImage')
with db_session:
image_info = VmImageInfoMapper.get(image_id='test_image3', host_ip='10.10.10.10')
self.assertEqual(utils.SUCCESS, image_info.compress_task_status)
@mock.patch('task.app_package_task.start_check_package_status')
def test_do_check_package_status(self, start_check_package_status):
start_check_package_status.return_value = None
with db_session:
AppPkgMapper(
app_package_id='app_package_id1',
host_ip='10.10.10.10',
status='uploading'
)
VmImageInfoMapper(
image_id='image_id1',
image_name='image_name1',
tenant_id='tenant001',
app_package_id='app_package_id1',
host_ip='10.10.10.10',
status='active'
)
VmImageInfoMapper(
image_id='image_id2',
image_name='image_name2',
tenant_id='tenant001',
app_package_id='app_package_id1',
host_ip='10.10.10.10',
status='active'
)
commit()
do_check_package_status('app_package_id1', '10.10.10.10')
with db_session:
app_package_info = AppPkgMapper.get(app_package_id='app_package_id1', host_ip='10.10.10.10')
self.assertEqual(utils.UPLOADED, app_package_info.status)
|
py | 1a364781141c2a72dbe3bab2f8cfe8cca606cc42 | """Generated API Documentation sample using
doc_writer_sample.py."""
doc = {
"@context": {
"ApiDocumentation": "hydra:ApiDocumentation",
"description": "hydra:description",
"domain": {
"@id": "rdfs:domain",
"@type": "@id"
},
"expects": {
"@id": "hydra:expects",
"@type": "@id"
},
"hydra": "http://www.w3.org/ns/hydra/core#",
"label": "rdfs:label",
"method": "hydra:method",
"possibleStatus": "hydra:possibleStatus",
"property": {
"@id": "hydra:property",
"@type": "@id"
},
"range": {
"@id": "rdfs:range",
"@type": "@id"
},
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"readonly": "hydra:readonly",
"required": "hydra:required",
"returns": {
"@id": "hydra:returns",
"@type": "@id"
},
"statusCode": "hydra:statusCode",
"statusCodes": "hydra:statusCodes",
"subClassOf": {
"@id": "rdfs:subClassOf",
"@type": "@id"
},
"supportedClass": "hydra:supportedClass",
"supportedOperation": "hydra:supportedOperation",
"supportedProperty": "hydra:supportedProperty",
"title": "hydra:title",
"vocab": "https://hydrus.com/api/vocab#",
"writeonly": "hydra:writeonly"
},
"@id": "https://hydrus.com/api/vocab",
"@type": "ApiDocumentation",
"description": "Description for the API Documentation",
"possibleStatus": [],
"supportedClass": [
{
"@id": "vocab:dummyClass",
"@type": "hydra:Class",
"description": "A dummyClass for demo",
"supportedOperation": [
{
"@type": "http://schema.org/UpdateAction",
"expects": "vocab:dummyClass",
"method": "POST",
"possibleStatus": [
{
"description": "dummyClass updated",
"statusCode": 200
}
],
"returns": "null",
"title": "UpdateClass"
},
{
"@type": "http://schema.org/DeleteAction",
"expects": "null",
"method": "DELETE",
"possibleStatus": [
{
"description": "dummyClass deleted",
"statusCode": 200
}
],
"returns": "null",
"title": "DeleteClass"
},
{
"@type": "http://schema.org/AddAction",
"expects": "vocab:dummyClass",
"method": "PUT",
"possibleStatus": [
{
"description": "dummyClass successfully added",
"statusCode": 201
}
],
"returns": "null",
"title": "AddClass"
},
{
"@type": "http://schema.org/FindAction",
"expects": "null",
"method": "GET",
"possibleStatus": [
{
"description": "dummyClass returned",
"statusCode": 200
}
],
"returns": "vocab:dummyClass",
"title": "GetClass"
}
],
"supportedProperty": [
{
"@type": "SupportedProperty",
"property": "http://props.hydrus.com/prop1",
"readonly": "false",
"required": "false",
"title": "Prop1",
"writeonly": "true"
},
{
"@type": "SupportedProperty",
"property": "http://props.hydrus.com/prop1",
"readonly": "false",
"required": "false",
"title": "Prop2",
"writeonly": "true"
}
],
"title": "dummyClass"
},
{
"@id": "vocab:extraClass",
"@type": "hydra:Class",
"description": "Class without any explicit methods",
"supportedOperation": [],
"supportedProperty": [],
"title": "extraClass"
},
{
"@id": "vocab:singleClass",
"@type": "hydra:Class",
"description": "A non collection class",
"supportedOperation": [
{
"@type": "http://schema.org/UpdateAction",
"expects": "vocab:singleClass",
"method": "POST",
"possibleStatus": [
{
"description": "singleClass changed",
"statusCode": 200
}
],
"returns": "null",
"title": "UpdateClass"
},
{
"@type": "http://schema.org/DeleteAction",
"expects": "null",
"method": "DELETE",
"possibleStatus": [
{
"description": "singleClass deleted",
"statusCode": 200
}
],
"returns": "null",
"title": "DeleteClass"
},
{
"@type": "http://schema.org/AddAction",
"expects": "vocab:singleClass",
"method": "PUT",
"possibleStatus": [
{
"description": "singleClass successfully added",
"statusCode": 201
}
],
"returns": "null",
"title": "AddClass"
},
{
"@type": "http://schema.org/FindAction",
"expects": "null",
"method": "GET",
"possibleStatus": [
{
"description": "singleClass returned",
"statusCode": 200
}
],
"returns": "vocab:singleClass",
"title": "GetClass"
}
],
"supportedProperty": [
{
"@type": "SupportedProperty",
"property": "http://props.hydrus.com/prop1",
"readonly": "false",
"required": "false",
"title": "Prop1",
"writeonly": "true"
},
{
"@type": "SupportedProperty",
"property": "http://props.hydrus.com/prop1",
"readonly": "false",
"required": "false",
"title": "Prop2",
"writeonly": "true"
},
{
"@type": "SupportedProperty",
"property": "vocab:dummyClass",
"readonly": "false",
"required": "false",
"title": "dummyProp",
"writeonly": "true"
},
{
"@type": "SupportedProperty",
"property": "vocab:anotherSingleClass",
"readonly": "false",
"required": "false",
"title": "singleClassProp",
"writeonly": "true"
}
],
"title": "singleClass"
},
{
"@id": "vocab:anotherSingleClass",
"@type": "hydra:Class",
"description": "An another non collection class",
"supportedOperation": [
{
"@type": "http://schema.org/FindAction",
"expects": "null",
"method": "GET",
"possibleStatus": [
{
"description": "anotherSingleClass returned",
"statusCode": 200
}
],
"returns": "vocab:anotherSingleClass",
"title": "GetClass"
}
],
"supportedProperty": [
{
"@type": "SupportedProperty",
"property": "http://props.hydrus.com/prop1",
"readonly": "false",
"required": "false",
"title": "Prop1",
"writeonly": "true"
}
],
"title": "anotherSingleClass"
},
{
"@id": "http://www.w3.org/ns/hydra/core#Resource",
"@type": "hydra:Class",
"description": "null",
"supportedOperation": [],
"supportedProperty": [],
"title": "Resource"
},
{
"@id": "http://www.w3.org/ns/hydra/core#Collection",
"@type": "hydra:Class",
"description": "null",
"supportedOperation": [],
"supportedProperty": [
{
"@type": "SupportedProperty",
"property": "http://www.w3.org/ns/hydra/core#member",
"readonly": "false",
"required": "null",
"title": "members",
"writeonly": "false"
}
],
"title": "Collection"
},
{
"@id": "vocab:dummyClassCollection",
"@type": "hydra:Class",
"description": "A collection of dummyclass",
"subClassOf": "http://www.w3.org/ns/hydra/core#Collection",
"supportedOperation": [
{
"@id": "_:dummyclass_collection_retrieve",
"@type": "http://schema.org/FindAction",
"description": "Retrieves all dummyClass entities",
"expects": "null",
"method": "GET",
"returns": "vocab:dummyClassCollection",
"statusCodes": []
},
{
"@id": "_:dummyclass_create",
"@type": "http://schema.org/AddAction",
"description": "Create new dummyClass entitity",
"expects": "vocab:dummyClass",
"method": "PUT",
"returns": "vocab:dummyClass",
"statusCodes": [
{
"description": "If the dummyClass entity was created successfully.",
"statusCode": 201
}
]
}
],
"supportedProperty": [
{
"@type": "SupportedProperty",
"description": "The dummyclass",
"property": "http://www.w3.org/ns/hydra/core#member",
"readonly": "false",
"required": "false",
"title": "members",
"writeonly": "false"
}
],
"title": "dummyClassCollection"
},
{
"@id": "vocab:extraClassCollection",
"@type": "hydra:Class",
"description": "A collection of extraclass",
"subClassOf": "http://www.w3.org/ns/hydra/core#Collection",
"supportedOperation": [
{
"@id": "_:extraclass_collection_retrieve",
"@type": "http://schema.org/FindAction",
"description": "Retrieves all extraClass entities",
"expects": "null",
"method": "GET",
"returns": "vocab:extraClassCollection",
"statusCodes": []
},
{
"@id": "_:extraclass_create",
"@type": "http://schema.org/AddAction",
"description": "Create new extraClass entitity",
"expects": "vocab:extraClass",
"method": "PUT",
"returns": "vocab:extraClass",
"statusCodes": [
{
"description": "If the extraClass entity was created successfully.",
"statusCode": 201
}
]
}
],
"supportedProperty": [
{
"@type": "SupportedProperty",
"description": "The extraclass",
"property": "http://www.w3.org/ns/hydra/core#member",
"readonly": "false",
"required": "false",
"title": "members",
"writeonly": "false"
}
],
"title": "extraClassCollection"
},
{
"@id": "vocab:EntryPoint",
"@type": "hydra:Class",
"description": "The main entry point or homepage of the API.",
"supportedOperation": [
{
"@id": "_:entry_point",
"@type": "http://schema.org/FindAction",
"description": "The APIs main entry point.",
"expects": "null",
"method": "GET",
"returns": "null",
"statusCodes": "vocab:EntryPoint"
}
],
"supportedProperty": [
{
"hydra:description": "The singleClass Class",
"hydra:title": "singleclass",
"property": {
"@id": "vocab:EntryPoint/singleClass",
"@type": "hydra:Link",
"description": "A non collection class",
"domain": "vocab:EntryPoint",
"label": "singleClass",
"range": "vocab:singleClass",
"supportedOperation": [
{
"@id": "updateclass",
"@type": "http://schema.org/UpdateAction",
"description": "null",
"expects": "vocab:singleClass",
"label": "UpdateClass",
"method": "POST",
"returns": "null",
"statusCodes": [
{
"description": "singleClass changed",
"statusCode": 200
}
]
},
{
"@id": "deleteclass",
"@type": "http://schema.org/DeleteAction",
"description": "null",
"expects": "null",
"label": "DeleteClass",
"method": "DELETE",
"returns": "null",
"statusCodes": [
{
"description": "singleClass deleted",
"statusCode": 200
}
]
},
{
"@id": "addclass",
"@type": "http://schema.org/AddAction",
"description": "null",
"expects": "vocab:singleClass",
"label": "AddClass",
"method": "PUT",
"returns": "null",
"statusCodes": [
{
"description": "singleClass successfully added",
"statusCode": 201
}
]
},
{
"@id": "getclass",
"@type": "http://schema.org/FindAction",
"description": "null",
"expects": "null",
"label": "GetClass",
"method": "GET",
"returns": "vocab:singleClass",
"statusCodes": [
{
"description": "singleClass returned",
"statusCode": 200
}
]
}
]
},
"readonly": "true",
"required": "null",
"writeonly": "false"
},
{
"hydra:description": "The anotherSingleClass Class",
"hydra:title": "anothersingleclass",
"property": {
"@id": "vocab:EntryPoint/anotherSingleClass",
"@type": "hydra:Link",
"description": "An another non collection class",
"domain": "vocab:EntryPoint",
"label": "anotherSingleClass",
"range": "vocab:anotherSingleClass",
"supportedOperation": [
{
"@id": "getclass",
"@type": "http://schema.org/FindAction",
"description": "null",
"expects": "null",
"label": "GetClass",
"method": "GET",
"returns": "vocab:anotherSingleClass",
"statusCodes": [
{
"description": "anotherSingleClass returned",
"statusCode": 200
}
]
}
]
},
"readonly": "true",
"required": "null",
"writeonly": "false"
},
{
"hydra:description": "The dummyClassCollection collection",
"hydra:title": "dummyclasscollection",
"property": {
"@id": "vocab:EntryPoint/DcTest",
"@type": "hydra:Link",
"description": "The dummyClassCollection collection",
"domain": "vocab:EntryPoint",
"label": "dummyClassCollection",
"range": "vocab:dummyClassCollection",
"supportedOperation": [
{
"@id": "_:dummyclass_collection_retrieve",
"@type": "http://schema.org/FindAction",
"description": "Retrieves all dummyClass entities",
"expects": "null",
"method": "GET",
"returns": "vocab:dummyClassCollection",
"statusCodes": []
},
{
"@id": "_:dummyclass_create",
"@type": "http://schema.org/AddAction",
"description": "Create new dummyClass entitity",
"expects": "vocab:dummyClass",
"method": "PUT",
"returns": "vocab:dummyClass",
"statusCodes": [
{
"description": "If the dummyClass entity was created successfully.",
"statusCode": 201
}
]
}
]
},
"readonly": "true",
"required": "null",
"writeonly": "false"
},
{
"hydra:description": "The extraClassCollection collection",
"hydra:title": "extraclasscollection",
"property": {
"@id": "vocab:EntryPoint/EcTest",
"@type": "hydra:Link",
"description": "The extraClassCollection collection",
"domain": "vocab:EntryPoint",
"label": "extraClassCollection",
"range": "vocab:extraClassCollection",
"supportedOperation": [
{
"@id": "_:extraclass_collection_retrieve",
"@type": "http://schema.org/FindAction",
"description": "Retrieves all extraClass entities",
"expects": "null",
"method": "GET",
"returns": "vocab:extraClassCollection",
"statusCodes": []
},
{
"@id": "_:extraclass_create",
"@type": "http://schema.org/AddAction",
"description": "Create new extraClass entitity",
"expects": "vocab:extraClass",
"method": "PUT",
"returns": "vocab:extraClass",
"statusCodes": [
{
"description": "If the extraClass entity was created successfully.",
"statusCode": 201
}
]
}
]
},
"readonly": "true",
"required": "null",
"writeonly": "false"
}
],
"title": "EntryPoint"
}
],
"title": "Title for the API Documentation"
}# nopep8 |
py | 1a364855dde05b42c27ab339fbacade832a1043f | # no of motion steps in run
run_steps = 35
# debugging mode with a single particle
single_particle_mode = False
# run particle filter with known starting point
# (always true for single particle mode)
known_starting_point = False
# run on real robot rather than simulating with logged robot data
real_robot_mode = False
motion_noise_on = True
wall_threshold = 5.0
# Use Case #1: Gather sample data from a real robot:
#
# single_particle_mode = True
# real_robot_mode = True
# motion_noise_on = False
# Use Case #2: Simulate single particle using previous measurement data:
#
# single_particle_mode = True
# real_robot_mode = False
# motion_noise_on = False
# Use Case #3: Simulate full particle filter using previous measurement
# data (known initial position):
#
# single_particle_mode = False
# known_starting_point = True
# real_robot_mode = False
# motion_noise_on = True
# Use Case #4: Simulate full particle filter using previous measurement
# data (unknown initial position):
#
# single_particle_mode = False
# known_starting_point = False
# real_robot_mode = False
# motion_noise_on = True
# Use Case #5: Run full particle filter on real robot
# (known initial position):
#
# single_particle_mode = False
# known_starting_point = True
# real_robot_mode = True
# motion_noise_on = True
# Use Case #6: Run full particle filter on real robot
# (unknown initial position):
#
# single_particle_mode = False
# known_starting_point = False
# real_robot_mode = True
# motion_noise_on = True
import time
from math import *
import random
if real_robot_mode:
from ev3.lego import Motor
from ev3.lego import UltrasonicSensor
a = Motor(port=Motor.PORT.A)
d = Motor(port=Motor.PORT.D)
a.reset()
d.reset()
a.position_mode=Motor.POSITION_MODE.RELATIVE
d.position_mode=Motor.POSITION_MODE.RELATIVE
sonar_left = UltrasonicSensor(port=2)
sonar_front = UltrasonicSensor(port=3)
# wall follower PD controller Kp constant
Kp = 0.03
# wall follower PD controller Kd constant
Kd = 0.02
# base wheel power / 10 (degrees/s)
base_power = 30.0
# constrains max/min wheel power levels
power_multiplier = 0.20
# all units are in cm
target_wall_dist = 25.4
wheel_diameter = 6.6
wheel_circumference = pi * wheel_diameter
dist_between_wheels = 11.4
# readings beyond this distance are unreliable (cm)
sonar_max_distance = 200.0
# sonar cone: +/- 25 (deg)
sonar_max_angle = 25.0 * pi / 180.0
# offset from robot center (midpoint between wheels) to front sensor (cm)
dist_front_sensor_to_center = 10.0
# offset from robot center (midpoint between wheels) to left sensor (cm)
dist_left_sensor_to_center = 10.0
# left sensor offset from front (rad)
left_sensor_orientation_offset = pi / 2.0
# world size in cm
world_size_x = 361
world_size_y = 349
# left sensor is normally within target_wall_dist of a wall
sensor_noise_left = 2.0
# front sensor is normally much further from a wall so accuracy goes down
# (NOT CURRENTLY USED)
sensor_noise_front = 5.0
if motion_noise_on:
# applies to any near straight motion or slight turn motion
steering_noise = 0.02
# only applies to true straight motion (ex. after hard left turns at 45 deg)
distance_noise = 0.05
# applies to turning in place (hard left or hard right turns)
turning_noise = 0.05
else:
# applies to any near straight motion or slight turn motion
steering_noise = 0.0
# only applies to true straight motion (ex. after hard left turns at 45 deg)
distance_noise = 0.0
# applies to turning in place (hard left or hard right turns)
turning_noise = 0.0
# avoids being overly aggressive on particle killing
# (sonar readings can be rubbish at times)
# robust_likelihood_constant = 0.5
def power_limit(pwr):
pwr_min = base_power - base_power * power_multiplier
pwr_max = base_power + base_power * power_multiplier
if pwr < pwr_min:
return pwr_min
if pwr > pwr_max:
return pwr_max
return pwr
# Occupancy grid - checks for available positions
class Grid:
def __init__(self, rows, cols):
self.obstacles = []
self.grid = []
for row in xrange(rows): self.grid += [[0]*cols]
def add_obstacle(self, row_start, row_end, col_start, col_end):
for row in xrange(row_start, row_end + 1):
for col in xrange(col_start, col_end + 1):
self.grid[row][col] = 1
def is_available(self, x, y):
if (0 <= x < world_size_x) and (0 <= y < world_size_y):
if self.grid[y][x] == 0:
return True
return False
def wall_penalty(self, x, y):
new_x = x
new_y = y
search_stack = []
directions = [
[ 0, 1],
[ 1, 0],
[-1, 0],
[ 0, -1],
[ 1, 1],
[-1, 1],
[ 1, -1],
[-1, -1]
]
if new_x < 0:
new_x = 0
elif new_x >= world_size_x:
new_x = world_size_x - 1
if new_y < 0:
new_y = 0
elif new_y >= world_size_y:
new_y = world_size_y - 1
tracker = []
for i in range(world_size_y):
tracker.append([])
for j in range(world_size_x):
tracker[i].append(0)
tracker[new_y][new_x] = 1
while self.grid[new_y][new_x] != 0:
for d in range(8):
nx = new_x + directions[d][0]
ny = new_y + directions[d][1]
if ((0 <= nx < world_size_x) and (0 <= ny < world_size_y)
and tracker[ny][nx] == 0 and
sqrt((nx - x)**2 + (ny - y)**2) < wall_threshold):
search_stack.append([ny, nx])
tracker[ny][nx] = 1
if len(search_stack) < 1:
return wall_threshold**2
[new_y, new_x] = search_stack.pop(0)
distance = sqrt((new_x - x)**2 + (new_y - y)**2)
return distance
def print_grid(self):
rows = len(self.grid)
cols = len(self.grid[0])
print "[ ",
for row in reversed(xrange(rows)):
if (row >= 0): print "\n ",
print "[ ",
for col in xrange(cols):
if (col > 0): print ",",
print str(self.grid[row][col]),
print "]",
print "]"
# Robot simulator
class robot_sim:
def __init__(self):
if single_particle_mode or known_starting_point:
# initial known x position for single robot simulator testing
self.x = 35.6
# initial known y position for single robot simulator testing
self.y = 7.6
# initial known orientation for single robot simulator testing
self.orientation = pi / 2
else:
# initial random x position for particle filter
self.x = random.random() * (world_size_x - 1)
# initial random y position for particle filter
self.y = random.random() * (world_size_y - 1)
# initial random choice of orientation for particle filter
# (assume robot always starts facing N, S, W, or E)
self.orientation = random.choice([0.0, pi / 2, pi, 3 * pi / 2])
self.sensor_noise_left = 0.0
self.sensor_noise_front = 0.0
self.steering_noise = 0.0
self.distance_noise = 0.0
self.turning_noise = 0.0
def set(self, new_x, new_y, new_orientation):
if new_orientation < 0 or new_orientation >= 2 * pi:
raise ValueError, 'Orientation must be in [0..2pi]'
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation)
def set_noise(self, new_sensor_noise_left, new_sensor_noise_front,
new_steering_noise, new_distance_noise, new_turning_noise):
self.sensor_noise_left = float(new_sensor_noise_left)
self.sensor_noise_front = float(new_sensor_noise_front)
self.steering_noise = float(new_steering_noise)
self.distance_noise = float(new_distance_noise)
self.turning_noise = float(new_turning_noise)
def find_closest_wall(self, walls, sensor):
orientation_offset = 0.0
sensor_dist_offset = 0.0
if sensor == 'left':
orientation_offset = left_sensor_orientation_offset
sensor_dist_offset = dist_left_sensor_to_center
elif sensor == 'front':
sensor_dist_offset = dist_front_sensor_to_center
close_walls = []
for wall in walls:
on_line_segment = False
x1 = wall[0]
y1 = wall[1]
x2 = wall[2]
y2 = wall[3]
# angle between sensor and wall
angle_to_wall = acos((cos(self.orientation +
orientation_offset) * (y1 - y2) + sin(self.orientation +
orientation_offset) * (x2 - x1)) / sqrt((y1 - y2) ** 2 +
(x2 - x1) ** 2 ))
# check that we don't exceed the sonar cone
if abs(angle_to_wall) <= sonar_max_angle:
# accommodate differences between sensor mount positions and
# robot center (mid-point between wheels)
sensor_x = self.x + sensor_dist_offset * cos(self.orientation
+ orientation_offset)
sensor_y = self.y + sensor_dist_offset * sin(self.orientation
+ orientation_offset)
# forward distance from sensor to wall
dist_to_wall = ((y2 - y1) * (x1 - sensor_x) - (x2 - x1) *
(y1 - sensor_y)) / ((y2 - y1) * cos(self.orientation +
orientation_offset) - (x2 - x1) * sin(self.orientation +
orientation_offset))
# intercept point on wall based on following forward vector
# from sensor
x_intercept_point = sensor_x + dist_to_wall * cos(
self.orientation + orientation_offset)
y_intercept_point = sensor_y + dist_to_wall * sin(
self.orientation + orientation_offset)
# check that intercept point is within the endpoints of the
# wall
if (x1 - x2) == 0:
if ((y1 <= y_intercept_point <= y2) or
(y2 <= y_intercept_point <= y1)):
on_line_segment = True
elif (y1 - y2) == 0:
if ((x1 <= x_intercept_point <= x2) or
(x2 <= x_intercept_point <= x1)):
on_line_segment = True
elif(((x1 <= x_intercept_point <= x2) or
(x2 <= x_intercept_point <= x1)) and
((y1 <= y_intercept_point <= y2) or
(y2 <= y_intercept_point <= y1))):
on_line_segment = True
if(on_line_segment and
(abs(dist_to_wall) <= sonar_max_distance)):
close_walls.append(abs(dist_to_wall))
if not close_walls:
if single_particle_mode:
print 'Sim - Sensor dist, ', sensor, ':', sonar_max_distance
return sonar_max_distance
else:
# choose the closest viable wall
if single_particle_mode:
print 'Sim - Sensor dist, ', sensor, ':', min(close_walls)
return min(close_walls)
def move_time(self, pwr_l, pwr_r, duration):
result = robot_sim()
result.sensor_noise_left = self.sensor_noise_left
result.sensor_noise_front = self.sensor_noise_front
result.steering_noise = self.steering_noise
result.distance_noise = self.distance_noise
result.turning_noise = self.turning_noise
# velocity is in cm/s
velocity_left = pwr_l * 10 * wheel_circumference / 360.0
velocity_right = pwr_r * 10 * wheel_circumference / 360.0
# beta - radius of arc
# R - radius of arc
# dist - distance driven (for true straight motion)
x = 0.0
y = 0.0
orientation = 0.0
if velocity_left == velocity_right: # going straight
# print 'Sim - going straight...'
# add steering noise
orientation = (self.orientation +
random.gauss(0.0, self.steering_noise)) % (2 * pi)
dist = velocity_right * duration / 1000.0
# add distance noise proportional to distance
dist += random.gauss(0.0, self.distance_noise * dist)
x = self.x + dist * cos(orientation)
y = self.y + dist * sin(orientation)
else:
# print 'Sim - slight turn...'
R = dist_between_wheels * (velocity_left + velocity_right) / (2 *
(velocity_right - velocity_left))
# print 'R is: ', R
beta = (velocity_right - velocity_left) * (duration / 1000.0
) / dist_between_wheels
# add steering noise
beta += random.gauss(0.0, self.steering_noise)
# print 'beta is: ', beta
orientation = (self.orientation + beta) % (2 * pi)
cx = self.x - sin(self.orientation) * R
cy = self.y + cos(self.orientation) * R
x = cx + sin(self.orientation + beta) * R
y = cy - cos(self.orientation + beta) * R
result.set(x, y, orientation)
if single_particle_mode:
print 'Sim - straight or slight turn...'
print 'Sim - new x: ', x
print 'Sim - new y: ', y
print 'Sim - new orientation (rad): ', orientation
print 'Sim - new orientation (deg): ', orientation * 180 / pi
return result
def turn_in_place(self, turn_angle, pwr=150.0):
x = 0.0
y = 0.0
orientation = 0.0
result = robot_sim()
result.sensor_noise_left = self.sensor_noise_left
result.sensor_noise_front = self.sensor_noise_front
result.steering_noise = self.steering_noise
result.distance_noise = self.distance_noise
result.turning_noise = self.turning_noise
orientation = (self.orientation + (turn_angle * pi / 180
) + random.gauss(0.0, self.turning_noise)) % (2 * pi)
x = self.x
y = self.y
result.set(x, y, orientation)
if single_particle_mode:
print 'Sim - turn in place...'
print 'Sim - new x: ', x
print 'Sim - new y: ', y
print 'Sim - new orientation (rad): ', orientation
print 'Sim - new orientation (deg): ', orientation * 180 / pi
return result
def measurement_prob(self, measurements, grid):
predicted_measurements = []
predicted_measurements.append(self.find_closest_wall(walls, 'left'))
predicted_measurements.append(self.find_closest_wall(walls, 'front'))
# compute left sensor gaussian error - use sensor_noise_left
error_sense_dist_left = abs(measurements[0] - predicted_measurements[0])
# error_left = (exp(- (error_sense_dist_left ** 2) /
# (self.sensor_noise_left ** 2) / 2.0))
error_left = (exp(- (error_sense_dist_left ** 2) /
(self.sensor_noise_left ** 2) / 2.0) /
sqrt(2.0 * pi * (self.sensor_noise_left ** 2)))
# compute front sensor gaussian error - use sensor_noise_front
error_sense_dist_front = abs(measurements[1] - predicted_measurements[1])
# error_front = (exp(- (error_sense_dist_front ** 2) /
# (self.sensor_noise_front ** 2) / 2.0))
error_front = (exp(- (error_sense_dist_front ** 2) /
(self.sensor_noise_front ** 2) / 2.0) /
sqrt(2.0 * pi * (self.sensor_noise_front ** 2)))
# INCLUDE FRONT SENSOR IN CALCULATION
error = error_left * error_front
wall_penalty = grid.wall_penalty(int(self.x), int(self.y))
# penalize out of bounds particles (includes obstacle locations)
if wall_penalty > 1.0:
error *= wall_penalty
if single_particle_mode:
print 'Sim - in grid: ', in_grid
print 'Sim - gaussian error left: ', error_left
print 'Sim - gaussian error front: ', error_front
print 'Sim - gaussian error total: ', error
return error
def __repr__(self):
# return '[x=%.6s y=%.6s orient=%.6s]' % (str(self.x), str(self.y),
# str(self.orientation))
return '%.6s %.6s' % (str(self.x), str(self.y))
if real_robot_mode:
class robot_real:
def sense_front(self):
reading1 = sonar_front.dist_cm
reading2 = sonar_front.dist_cm
reading3 = sonar_front.dist_cm
result = min(reading1, reading2, reading3) / 10.0
if result < sonar_max_distance:
return result
else:
return sonar_max_distance
def sense_left(self):
reading1 = sonar_left.dist_cm
reading2 = sonar_left.dist_cm
reading3 = sonar_left.dist_cm
result = min(reading1, reading2, reading3) / 10.0
if result < sonar_max_distance:
return result
else:
return sonar_max_distance
def move_time(self, pwr_l, pwr_r, duration):
a.run_time_limited(time_sp=duration, speed_sp=pwr_l * 10,
regulation_mode=True, stop_mode=Motor.STOP_MODE.BRAKE)
d.run_time_limited(time_sp=duration, speed_sp=pwr_r * 10,
regulation_mode=True, stop_mode=Motor.STOP_MODE.BRAKE)
time.sleep(duration / 1000.0)
def turn_in_place(self, turn_angle, pwr=150.0):
a.reset()
d.reset()
wheel_distance = (turn_angle / 360.0) * pi * dist_between_wheels
wheel_angle = (wheel_distance / wheel_circumference) * 360.0
a.run_position_limited(position_sp=-wheel_angle, speed_sp=pwr,
stop_mode=Motor.STOP_MODE.BRAKE)
d.run_position_limited(position_sp=wheel_angle, speed_sp=pwr,
stop_mode=Motor.STOP_MODE.BRAKE)
time.sleep(abs(wheel_angle/pwr))
def stop(self):
a.stop()
d.stop()
class particle_filter:
def __init__(self, grid, N=500):
self.p = []
self.w = []
if single_particle_mode:
self.count = 1
else:
self.count = N
for i in range(self.count):
r = robot_sim()
# re-create initial particle if it lands on an unavailable spot
while grid.wall_penalty(int(r.x), int(r.y)) >= wall_threshold:
r = robot_sim()
r.set_noise(sensor_noise_left, sensor_noise_front, steering_noise,
distance_noise, turning_noise)
self.p.append(r)
def measurement_update(self, measurements, grid):
for i in range(self.count):
self.w.append(self.p[i].measurement_prob(measurements, grid))
def motion_update(self, motion):
p2 = []
for i in range(self.count):
motion_command = motion[0]
if motion_command == 'turn_in_place':
turn_angle = motion[1]
p2.append(self.p[i].turn_in_place(turn_angle))
if motion_command == 'move_time':
power_left = motion[1]
power_right = motion[2]
duration = motion[3]
p2.append(self.p[i].move_time(power_left, power_right, duration))
self.p = p2
def resample(self):
p3 = []
index = int(random.random() * self.count)
beta = 0.0
mw = max(self.w)
for i in range(self.count):
beta += random.random() * 2.0 * mw
while beta > self.w[index]:
beta -= self.w[index]
index = (index + 1) % self.count
p3.append(self.p[index])
self.p = p3
def get_position(self):
x = 0.0
y = 0.0
orientation = 0.0
for i in range(len(self.p)):
x += self.p[i].x
y += self.p[i].y
orientation += (((self.p[i].orientation - self.p[0].orientation + pi)
% (2.0 * pi)) + self.p[0].orientation - pi)
return [x / len(self.p), y / len(self.p), orientation / len(self.p)]
if real_robot_mode:
# real robot
ev3 = robot_real()
mygrid = Grid(world_size_y, world_size_x)
# bed
mygrid.add_obstacle(130, 348, 0, 106)
# dresser
mygrid.add_obstacle(279, 348, 255, 360)
# table
mygrid.add_obstacle(0, 162, 283, 360)
walls = []
# format of wall is [x1, y1, x2, y2]
walls.append([0, 0, 0, 130]) # wall 1
walls.append([0, 130, 106, 130]) # wall 2
walls.append([106, 130, 106, 348]) # wall 3
walls.append([106, 348, 246, 348]) # wall 4
walls.append([246, 348, 246, 279]) # wall 5
walls.append([246, 279, 360, 279]) # wall 6
walls.append([360, 279, 360, 164]) # wall 7
walls.append([360, 164, 283, 164]) # wall 8
walls.append([283, 164, 283, 0]) # wall 9
walls.append([283, 0, 0, 0]) # wall 10
if real_robot_mode:
# store all measurements
measurement_history = []
else:
measurement_history = [[27.3, 113.5], [27.8, 95.6], [28.1, 77.8],
[29.0, 60.3], [28.1, 42.0], [27.7, 23.6], [21.3, 71.0], [22.5, 200.0],
[23.5, 200.0], [24.1, 183.5], [25.9, 165.7], [200.0, 147.8], [38.3, 200.0],
[28.2, 184.3], [27.3, 167.1], [24.7, 149.9], [22.9, 130.0], [21.1, 112.3],
[19.0, 94.1], [17.5, 76.5], [15.7, 59.8], [14.5, 43.9], [13.7, 34.8],
[33.0, 117.1], [33.4, 99.3], [32.6, 81.4], [29.6, 64.5], [26.3, 46.2],
[25.1, 29.0], [25.1, 200.0], [23.7, 200.0], [26.7, 200.0], [35.7, 200.0],
[80.0, 200.0], [67.2, 103.8]]
# Sonar measurement data from real robot runs
#
# Data Set #1:
# [[26.7, 113.1], [26.7, 95.1], [25.7, 77.6], [25.1, 59.8], [24.1, 41.0],
# [23.3, 23.5], [20.9, 74.2], [22.1, 200.0], [22.7, 200.0], [23.3, 200.0],
# [22.0, 172.4], [200.0, 152.0], [48.1, 200.0], [22.8, 182.5], [20.4, 178.3],
# [19.8, 146.5], [18.4, 127.4], [17.3, 109.0], [16.6, 92.1], [14.9, 74.8],
# [14.3, 58.2], [13.9, 42.7], [14.9, 33.0], [32.0, 116.6], [32.1, 98.6],
# [32.6, 80.4], [32.0, 62.6], [29.8, 46.0], [28.1, 27.8], [24.1, 200.0],
# [23.9, 200.0], [28.8, 200.0], [77.2, 200.0], [37.2, 200.0], [47.3, 96.4]]
# Robot starting point: x=35.6, y=7.6, orient = pi/2
# Robot ending point (based on actual measurement): x=287, y=235, orient = 0.314
#
# Data Set #2:
# [[27.3, 113.5], [27.8, 95.6], [28.1, 77.8], [29.0, 60.3], [28.1, 42.0],
# [27.7, 23.6], [21.3, 71.0], [22.5, 200.0], [23.5, 200.0], [24.1, 183.5],
# [25.9, 165.7], [200.0, 147.8], [38.3, 200.0], [28.2, 184.3], [27.3, 167.1],
# [24.7, 149.9], [22.9, 130.0], [21.1, 112.3], [19.0, 94.1], [17.5, 76.5],
# [15.7, 59.8], [14.5, 43.9], [13.7, 34.8], [33.0, 117.1], [33.4, 99.3],
# [32.6, 81.4], [29.6, 64.5], [26.3, 46.2], [25.1, 29.0], [25.1, 200.0],
# [23.7, 200.0], [26.7, 200.0], [35.7, 200.0], [80.0, 200.0], [67.2, 103.8]]
# Robot starting point: x=35.6, y=7.6, orient = pi/2
# Robot ending point (based on actual measurement): x=277, y=232, orient = 0.314
# Initialize particle filter
pf = particle_filter(mygrid)
lastError = 0
derivative = 0
for i in range(run_steps):
# Sense
if real_robot_mode:
sonar_l = ev3.sense_left()
sonar_f = ev3.sense_front()
measurements = [sonar_l, sonar_f]
measurement_history.append(measurements)
else:
# Read from measurement history
measurements = measurement_history[i]
sonar_l = measurements[0]
sonar_f = measurements[1]
print ''
print 'Robot - left distance: ', sonar_l
print 'Robot - front distance: ', sonar_f
print 'PF - measurement update...'
pf.measurement_update(measurements, mygrid)
# Move
if sonar_f < (target_wall_dist * 1.5):
if real_robot_mode:
ev3.turn_in_place(-90.0)
motion = ['turn_in_place', -90]
print 'PF - update motion...'
pf.motion_update(motion)
elif sonar_l > (target_wall_dist * 1.5):
if real_robot_mode:
ev3.turn_in_place(45)
motion = ['turn_in_place', 45]
print 'PF - update motion...'
pf.motion_update(motion)
if real_robot_mode:
ev3.move_time(base_power, base_power, 1500)
motion = ['move_time', base_power, base_power, 1500]
print 'PF - update motion...'
pf.motion_update(motion)
else:
error = sonar_l - target_wall_dist
# print 'Robot - PD error: ', error
derivative = error - lastError
delta = Kp * error + Kd * derivative
# print 'Robot - PD delta total: ', delta
# print 'Robot - PD delta P component: ', Kp * error
# print 'Robot - PD delta D component: ', Kd * derivative
power_left = base_power - delta
power_right = base_power + delta
power_left = power_limit(power_left)
power_right = power_limit(power_right)
# print 'Robot - new left power: ', power_left
# print 'Robot - new right power: ', power_right
if real_robot_mode:
ev3.move_time(power_left, power_right, 1000)
motion = ['move_time', power_left, power_right, 1000]
print 'PF - update motion...'
pf.motion_update(motion)
lastError = error
print 'PF - resampling...'
pf.resample()
print 'PF - estimated position: '
print (pf.get_position())
# print measurement history on real run
if real_robot_mode:
print ''
print 'Measurement history: '
print measurement_history
# print final particle set for visualization
#print ''
#print 'PF: final particle set: '
#print pf.p
|
py | 1a364883eb9d3b0db54b3ace7230d76ec61961f5 | #!/usr/bin/env python
###############################################################################
# $Id: gdal2grd.py 18195 2009-12-06 20:24:39Z rouault $
#
# Project: GDAL Python samples
# Purpose: Script to write out ASCII GRD rasters (used in Golden Software
# Surfer)
# from any source supported by GDAL.
# Author: Andrey Kiselev, [email protected]
#
###############################################################################
# Copyright (c) 2003, Andrey Kiselev <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
try:
from osgeo import gdal
from osgeo.gdalconst import *
gdal.TermProgress = gdal.TermProgress_nocb
except ImportError:
import gdal
from gdalconst import *
try:
import numpy as Numeric
Numeric.arrayrange = Numeric.arange
except ImportError:
import Numeric
import sys
# =============================================================================
def Usage():
print('Usage: gdal2grd.py [-b band] [-quiet] infile outfile')
print('Write out ASCII GRD rasters (used in Golden Software Surfer)')
print('')
print(' -b band Select a band number to convert (1 based)')
print(' -quiet Do not report any diagnostic information')
print(' infile Name of the input GDAL supported file')
print(' outfile Name of the output GRD file')
print('')
sys.exit(1)
# =============================================================================
infile = None
outfile = None
iBand = 1
quiet = 0
# Parse command line arguments.
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
if arg == '-b':
i = i + 1
iBand = int(sys.argv[i])
elif arg == '-quiet':
quiet = 1
elif infile is None:
infile = arg
elif outfile is None:
outfile = arg
else:
Usage()
i = i + 1
if infile is None:
Usage()
if outfile is None:
Usage()
indataset = gdal.Open(infile, GA_ReadOnly)
if infile == None:
print('Cannot open', infile)
sys.exit(2)
geotransform = indataset.GetGeoTransform()
band = indataset.GetRasterBand(iBand)
if band == None:
print('Cannot load band', iBand, 'from the', infile)
sys.exit(2)
if not quiet:
print('Size is ',indataset.RasterXSize,'x',indataset.RasterYSize,'x',indataset.RasterCount)
print('Projection is ',indataset.GetProjection())
print('Origin = (',geotransform[0], ',',geotransform[3],')')
print('Pixel Size = (',geotransform[1], ',',geotransform[5],')')
print('Converting band number',iBand,'with type',gdal.GetDataTypeName(band.DataType))
# Header printing
fpout = open(outfile, "wt")
fpout.write("DSAA\n")
fpout.write(str(band.XSize) + " " + str(band.YSize) + "\n")
fpout.write(str(geotransform[0] + geotransform[1] / 2) + " " +
str(geotransform[0] + geotransform[1] * (band.XSize - 0.5)) + "\n")
if geotransform[5] < 0:
fpout.write(str(geotransform[3] + geotransform[5] * (band.YSize - 0.5)) + " " +
str(geotransform[3] + geotransform[5] / 2) + "\n")
else:
fpout.write(str(geotransform[3] + geotransform[5] / 2) + " " +
str(geotransform[3] + geotransform[5] * (band.YSize - 0.5)) + "\n")
fpout.write(str(band.ComputeRasterMinMax(0)[0]) + " " +
str(band.ComputeRasterMinMax(0)[1]) + "\n")
for i in range(band.YSize - 1, -1, -1):
scanline = band.ReadAsArray(0, i, band.XSize, 1, band.XSize, 1)
j = 0
while j < band.XSize:
fpout.write(str(scanline[0, j]))
j = j + 1
if j % 10: # Print no more than 10 values per line
fpout.write(" ")
else:
fpout.write("\n")
fpout.write("\n")
# Display progress report on terminal
if not quiet:
gdal.TermProgress(float(band.YSize - i) / band.YSize)
|
py | 1a3648f6188e8df21eeab9f7959f21d2c99b8b87 | """Test cases for AST merge (used for fine-grained incremental checking)"""
import os
import shutil
from typing import List, Tuple, Dict, Optional
from mypy import build
from mypy.build import BuildResult
from mypy.modulefinder import BuildSource
from mypy.defaults import PYTHON3_VERSION
from mypy.errors import CompileError
from mypy.nodes import (
Node, MypyFile, SymbolTable, SymbolTableNode, TypeInfo, Expression, Var, TypeVarExpr,
UNBOUND_IMPORTED
)
from mypy.options import Options
from mypy.server.subexpr import get_subexpressions
from mypy.server.update import FineGrainedBuildManager
from mypy.strconv import StrConv
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase, DataSuite
from mypy.test.helpers import assert_string_arrays_equal, normalize_error_messages
from mypy.types import TypeStrVisitor, Type
from mypy.util import short_type, IdMapper
# Which data structures to dump in a test case?
SYMTABLE = 'SYMTABLE'
TYPEINFO = ' TYPEINFO'
TYPES = 'TYPES'
AST = 'AST'
NOT_DUMPED_MODULES = (
'builtins',
'typing',
'abc',
'contextlib',
'sys',
'mypy_extensions',
'enum',
)
class ASTMergeSuite(DataSuite):
files = ['merge.test']
def setup(self) -> None:
super().setup()
self.str_conv = StrConv(show_ids=True)
assert self.str_conv.id_mapper is not None
self.id_mapper = self.str_conv.id_mapper # type: IdMapper
self.type_str_conv = TypeStrVisitor(self.id_mapper)
def run_case(self, testcase: DataDrivenTestCase) -> None:
name = testcase.name
# We use the test case name to decide which data structures to dump.
# Dumping everything would result in very verbose test cases.
if name.endswith('_symtable'):
kind = SYMTABLE
elif name.endswith('_typeinfo'):
kind = TYPEINFO
elif name.endswith('_types'):
kind = TYPES
else:
kind = AST
main_src = '\n'.join(testcase.input)
result = self.build(main_src)
assert result is not None, 'cases where CompileError occurred should not be run'
result.manager.fscache.flush()
fine_grained_manager = FineGrainedBuildManager(result)
a = []
if result.errors:
a.extend(result.errors)
target_path = os.path.join(test_temp_dir, 'target.py')
shutil.copy(os.path.join(test_temp_dir, 'target.py.next'), target_path)
a.extend(self.dump(fine_grained_manager, kind))
old_subexpr = get_subexpressions(result.manager.modules['target'])
a.append('==>')
new_file, new_types = self.build_increment(fine_grained_manager, 'target', target_path)
a.extend(self.dump(fine_grained_manager, kind))
for expr in old_subexpr:
if isinstance(expr, TypeVarExpr):
# These are merged so we can't perform the check.
continue
# Verify that old AST nodes are removed from the expression type map.
assert expr not in new_types
a = normalize_error_messages(a)
assert_string_arrays_equal(
testcase.output, a,
'Invalid output ({}, line {})'.format(testcase.file,
testcase.line))
def build(self, source: str) -> Optional[BuildResult]:
options = Options()
options.incremental = True
options.fine_grained_incremental = True
options.use_builtins_fixtures = True
options.show_traceback = True
options.python_version = PYTHON3_VERSION
main_path = os.path.join(test_temp_dir, 'main')
with open(main_path, 'w') as f:
f.write(source)
try:
result = build.build(sources=[BuildSource(main_path, None, None)],
options=options,
alt_lib_path=test_temp_dir)
except CompileError:
# TODO: Is it okay to return None?
return None
return result
def build_increment(self, manager: FineGrainedBuildManager,
module_id: str, path: str) -> Tuple[MypyFile,
Dict[Expression, Type]]:
manager.update([(module_id, path)], [])
module = manager.manager.modules[module_id]
type_map = manager.graph[module_id].type_map()
return module, type_map
def dump(self,
manager: FineGrainedBuildManager,
kind: str) -> List[str]:
modules = manager.manager.modules
if kind == AST:
return self.dump_asts(modules)
elif kind == TYPEINFO:
return self.dump_typeinfos(modules)
elif kind == SYMTABLE:
return self.dump_symbol_tables(modules)
elif kind == TYPES:
return self.dump_types(manager)
assert False, 'Invalid kind %s' % kind
def dump_asts(self, modules: Dict[str, MypyFile]) -> List[str]:
a = []
for m in sorted(modules):
if m in NOT_DUMPED_MODULES:
# We don't support incremental checking of changes to builtins, etc.
continue
s = modules[m].accept(self.str_conv)
a.extend(s.splitlines())
return a
def dump_symbol_tables(self, modules: Dict[str, MypyFile]) -> List[str]:
a = []
for id in sorted(modules):
if not is_dumped_module(id):
# We don't support incremental checking of changes to builtins, etc.
continue
a.extend(self.dump_symbol_table(id, modules[id].names))
return a
def dump_symbol_table(self, module_id: str, symtable: SymbolTable) -> List[str]:
a = ['{}:'.format(module_id)]
for name in sorted(symtable):
if name.startswith('__'):
continue
a.append(' {}: {}'.format(name, self.format_symbol_table_node(symtable[name])))
return a
def format_symbol_table_node(self, node: SymbolTableNode) -> str:
if node.node is None:
if node.kind == UNBOUND_IMPORTED:
return 'UNBOUND_IMPORTED'
return 'None'
if isinstance(node.node, Node):
s = '{}<{}>'.format(str(type(node.node).__name__),
self.id_mapper.id(node.node))
else:
s = '? ({})'.format(type(node.node))
if (isinstance(node.node, Var) and node.node.type and
not node.node.fullname().startswith('typing.')):
typestr = self.format_type(node.node.type)
s += '({})'.format(typestr)
return s
def dump_typeinfos(self, modules: Dict[str, MypyFile]) -> List[str]:
a = []
for id in sorted(modules):
if not is_dumped_module(id):
continue
a.extend(self.dump_typeinfos_recursive(modules[id].names))
return a
def dump_typeinfos_recursive(self, names: SymbolTable) -> List[str]:
a = []
for name, node in sorted(names.items(), key=lambda x: x[0]):
if isinstance(node.node, TypeInfo):
a.extend(self.dump_typeinfo(node.node))
a.extend(self.dump_typeinfos_recursive(node.node.names))
return a
def dump_typeinfo(self, info: TypeInfo) -> List[str]:
if info.fullname() == 'enum.Enum':
# Avoid noise
return []
s = info.dump(str_conv=self.str_conv,
type_str_conv=self.type_str_conv)
return s.splitlines()
def dump_types(self, manager: FineGrainedBuildManager) -> List[str]:
a = []
# To make the results repeatable, we try to generate unique and
# deterministic sort keys.
for module_id in sorted(manager.manager.modules):
if not is_dumped_module(module_id):
continue
type_map = manager.graph[module_id].type_map()
if type_map:
a.append('## {}'.format(module_id))
for expr in sorted(type_map, key=lambda n: (n.line, short_type(n),
str(n) + str(type_map[n]))):
typ = type_map[expr]
a.append('{}:{}: {}'.format(short_type(expr),
expr.line,
self.format_type(typ)))
return a
def format_type(self, typ: Type) -> str:
return typ.accept(self.type_str_conv)
def is_dumped_module(id: str) -> bool:
return id not in NOT_DUMPED_MODULES and (not id.startswith('_') or id == '__main__')
|
py | 1a36490bcdee62f966735eac30a1704e0c996a45 | from ramda.private.asserts import *
from ramda.partial import partial
def multiply2(a, b):
return a * b
def greet(salutation, title, firstName, lastName):
return salutation + ", " + title + " " + firstName + " " + lastName + "!"
def partial_test():
double = partial(multiply2, [2])
assert_equal(double(2), 4)
say_hello = partial(greet, ["Hello"])
say_hello_to_ms = partial(say_hello, ["Ms."])
assert_equal(say_hello_to_ms("Jane", "Jones"), "Hello, Ms. Jane Jones!")
|
py | 1a3649504aecf7ca4f5d1cfb62fd4122f5d2c2e6 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import shade
from ospurge.resources import cinder
from ospurge.tests import mock
class TestBackups(unittest.TestCase):
def setUp(self):
self.cloud = mock.Mock(spec_set=shade.openstackcloud.OpenStackCloud)
self.creds_manager = mock.Mock(cloud=self.cloud)
def test_list(self):
self.assertIs(self.cloud.list_volume_backups.return_value,
cinder.Backups(self.creds_manager).list())
self.cloud.list_volume_backups.assert_called_once_with()
def test_delete(self):
backup = mock.MagicMock()
self.assertIsNone(cinder.Backups(self.creds_manager).delete(backup))
self.cloud.delete_volume_backup.assert_called_once_with(backup['id'])
def test_to_string(self):
backup = mock.MagicMock()
self.assertIn("Volume Backup",
cinder.Backups(self.creds_manager).to_str(backup))
class TestSnapshots(unittest.TestCase):
def setUp(self):
self.cloud = mock.Mock(spec_set=shade.openstackcloud.OpenStackCloud)
self.creds_manager = mock.Mock(cloud=self.cloud)
def test_list(self):
self.assertIs(self.cloud.list_volume_snapshots.return_value,
cinder.Snapshots(self.creds_manager).list())
self.cloud.list_volume_snapshots.assert_called_once_with()
def test_delete(self):
snapshot = mock.MagicMock()
self.assertIsNone(
cinder.Snapshots(self.creds_manager).delete(snapshot))
self.cloud.delete_volume_snapshot.assert_called_once_with(
snapshot['id'])
def test_to_string(self):
snapshot = mock.MagicMock()
self.assertIn("Volume Snapshot ",
cinder.Snapshots(self.creds_manager).to_str(snapshot))
class TestVolumes(unittest.TestCase):
def setUp(self):
self.cloud = mock.Mock(spec_set=shade.openstackcloud.OpenStackCloud)
self.creds_manager = mock.Mock(cloud=self.cloud, project_id=42)
def test_check_prerequisite(self):
self.cloud.list_volume_snapshots.return_value = []
self.assertEqual(
False,
cinder.Volumes(self.creds_manager).check_prerequisite()
)
self.cloud.list_volume_snapshots.assert_called_once_with()
self.cloud.list_servers.assert_called_once_with()
def test_list(self):
self.assertIs(self.cloud.list_volumes.return_value,
cinder.Volumes(self.creds_manager).list())
self.cloud.list_volumes.assert_called_once_with()
def test_should_delete(self):
self.assertEqual(
False,
cinder.Volumes(self.creds_manager).should_delete(
{'os-vol-tenant-attr:tenant_id': 84})
)
self.assertEqual(
True,
cinder.Volumes(self.creds_manager).should_delete(
{'os-vol-tenant-attr:tenant_id': 42})
)
def test_delete(self):
volume = mock.MagicMock()
self.assertIsNone(cinder.Volumes(self.creds_manager).delete(volume))
self.cloud.delete_volume.assert_called_once_with(volume['id'])
def test_to_string(self):
volume = mock.MagicMock()
self.assertIn("Volume ",
cinder.Volumes(self.creds_manager).to_str(volume))
|
py | 1a364aaa2ef822dbfbbfe989d22d9eed6cf338cf | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Unit tests for versioning.py functions.
"""
from datetime import datetime, timedelta
from operator import itemgetter
import pytest
from botocore.exceptions import ClientError
from botocore.stub import ANY
import versioning
@pytest.mark.parametrize("fail_func,error_code,stop_on_error", [
(None, None, False),
('stub_create_bucket', 'BucketAlreadyOwnedByYou', False),
('stub_create_bucket', 'TestException', True),
('stub_put_bucket_versioning', 'TestException', True),
('stub_put_bucket_lifecycle_configuration', 'TestException', False)
])
def test_create_versioned_bucket(
make_stubber, make_unique_name, stub_controller,
fail_func, error_code, stop_on_error):
s3_stubber = make_stubber(versioning.s3.meta.client)
bucket_name = make_unique_name('bucket')
obj_prefix = 'test-prefix'
stub_controller.add(
s3_stubber.stub_create_bucket,
(bucket_name, versioning.s3.meta.client.meta.region_name))
stub_controller.add(s3_stubber.stub_put_bucket_versioning, (bucket_name, 'Enabled'))
stub_controller.add(s3_stubber.stub_put_bucket_lifecycle_configuration, (
bucket_name, [{
'Status': 'Enabled',
'Prefix': obj_prefix,
'NoncurrentVersionExpiration': {'NoncurrentDays': ANY}
}],))
stub_controller.run(fail_func, error_code, stop_on_error)
if error_code and stop_on_error:
with pytest.raises(ClientError) as exc_info:
versioning.create_versioned_bucket(bucket_name, obj_prefix)
assert exc_info.value.response['Error']['Code'] == error_code
else:
bucket = versioning.create_versioned_bucket(bucket_name, obj_prefix)
assert bucket.name == bucket_name
@pytest.mark.parametrize("rollback_version", ["version-2", "non-existent-version"])
def test_rollback_object(
make_stubber, make_unique_name, stub_controller, rollback_version):
s3_stubber = make_stubber(versioning.s3.meta.client)
bucket_name = make_unique_name('bucket')
obj_key = make_unique_name('object')
versions = [
s3_stubber.make_version(
obj_key, f'version-{index}', True,
datetime.now() + timedelta(minutes=index))
for index in range(5)]
delete_markers = [
s3_stubber.make_version(
obj_key, f'version-{index}', True,
datetime.now() + timedelta(minutes=index))
for index in range(10, 15)]
sorted_versions = \
sorted(versions + delete_markers, key=itemgetter('LastModified'), reverse=True)
stub_controller.add(
s3_stubber.stub_list_object_versions, (bucket_name,),
kwargs={'prefix': obj_key, 'versions': versions,
'delete_markers': delete_markers})
if rollback_version in [ver['VersionId'] for ver in sorted_versions]:
for version in sorted_versions:
if version['VersionId'] != rollback_version:
stub_controller.add(
s3_stubber.stub_delete_object, (bucket_name, obj_key),
{'obj_version_id': version['VersionId']})
else:
break
stub_controller.add(
s3_stubber.stub_head_object, (bucket_name, obj_key))
stub_controller.run()
if rollback_version == 'non-existent-version':
with pytest.raises(KeyError):
versioning.rollback_object(
versioning.s3.Bucket(bucket_name), obj_key, rollback_version)
else:
versioning.rollback_object(
versioning.s3.Bucket(bucket_name), obj_key, rollback_version)
@pytest.mark.parametrize(
'code_path', ['happy', 'not_latest', 'no_deletes', 'no_versions'])
def test_revive_object(make_stubber, make_unique_name, stub_controller, code_path):
s3_stubber = make_stubber(versioning.s3.meta.client)
bucket_name = make_unique_name('bucket')
obj_key = make_unique_name('object')
if code_path == 'not_latest':
stub_controller.add(
s3_stubber.stub_list_object_versions,
(bucket_name, obj_key),
{'delete_markers':
[s3_stubber.make_version(obj_key, 'version1', False, datetime.now())],
'max_keys': 1})
elif code_path == 'no_deletes':
stub_controller.add(
s3_stubber.stub_list_object_versions, (bucket_name, obj_key),
{'versions':
[s3_stubber.make_version(obj_key, 'version1', True, datetime.now())],
'max_keys': 1})
elif code_path == 'no_versions':
stub_controller.add(
s3_stubber.stub_list_object_versions, (bucket_name, obj_key),
{'max_keys': 1})
elif code_path == 'happy':
stub_controller.add(
s3_stubber.stub_list_object_versions,
(bucket_name, obj_key),
{'delete_markers':
[s3_stubber.make_version(obj_key, 'version1', True, datetime.now())],
'max_keys': 1})
stub_controller.add(
s3_stubber.stub_delete_object, (bucket_name, obj_key),
{'obj_version_id': 'version1'})
stub_controller.add(s3_stubber.stub_head_object, (bucket_name, obj_key))
stub_controller.add(
s3_stubber.stub_get_object, (bucket_name, obj_key),
{'object_data': b'Test data', 'version_id': 'version1'})
stub_controller.run()
versioning.revive_object(versioning.s3.Bucket(bucket_name), obj_key)
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_permanently_delete_object(make_stubber, make_unique_name, error_code):
s3_stubber = make_stubber(versioning.s3.meta.client)
bucket_name = make_unique_name('bucket')
obj_key = make_unique_name('object')
s3_stubber.stub_list_object_versions(
bucket_name, obj_key, delete_markers=
[s3_stubber.make_version(obj_key, 'version1', True, datetime.now())])
s3_stubber.stub_delete_object_versions(bucket_name,
[s3_stubber.make_version(obj_key, 'version1')], error_code=error_code)
if not error_code:
versioning.permanently_delete_object(versioning.s3.Bucket(bucket_name), obj_key)
else:
with pytest.raises(ClientError) as exc_info:
versioning.permanently_delete_object(versioning.s3.Bucket(bucket_name),
obj_key)
assert exc_info.value.response['Error']['Code'] == error_code
|
py | 1a364af13b559c0a8cdd2532699041bf4b081a96 | import unittest
from io import BytesIO
from eth.util.netstring import (header, encode, FileEncoder,
decode_file, Decoder)
class TestNetstring(unittest.TestCase):
def setUp(self):
self.test_data = b"Netstring module by Will McGugan"
self.encoded_data = b"9:Netstring,6:module,2:by,4:Will,7:McGugan,"
def test_header(self):
tests = [ (b"netstring", b"9:"),
(b"Will McGugan", b"12:"),
(b"", b"0:") ]
for test, result in tests:
self.assertEqual(header(test), result)
def test_encode(self):
tests = [ (b"netstring", b"9:netstring,"),
(b"Will McGugan", b"12:Will McGugan,"),
(b"", b"0:,") ]
for test, result in tests:
self.assertEqual(encode(test), result)
def test_file_encoder(self):
file_out = BytesIO()
data = self.test_data.split()
encoder = FileEncoder(file_out)
for s in data:
encoder.write(s)
encoded_data = file_out.getvalue()
self.assertEqual(encoded_data, self.encoded_data)
def test_decode_file(self):
data = self.test_data.split()
for buffer_size in range(1, len(self.encoded_data)):
file_in = BytesIO(self.encoded_data[:])
decoded_data = list(decode_file(file_in, buffer_size=buffer_size))
self.assertEqual(decoded_data, data)
def test_decoder(self):
encoded_data = self.encoded_data
for step in range(1, len(encoded_data)):
i = 0
chunks = []
while i < len(encoded_data):
chunks.append(encoded_data[i:i+step])
i += step
decoder = Decoder()
decoded_data = []
for chunk in chunks:
for s in decoder.feed(chunk):
decoded_data.append(s)
self.assertEqual(decoded_data, self.test_data.split()) |
py | 1a364b8ee240d847ad80c2a8e5ded28e6b8b64a7 | from random import random
from threading import local
import conf
from options.registry.stats import STATS_DEFAULT_PREFIX
class BaseStatsBackend(local):
def __init__(self, prefix=None): # pylint:disable=super-init-not-called
if prefix is None:
prefix = conf.get(STATS_DEFAULT_PREFIX)
self.prefix = prefix
def _get_key(self, key):
if self.prefix:
return '{}.{}'.format(self.prefix, key)
return key
def _should_sample(self, sample_rate):
return sample_rate >= 1 or random() >= 1 - sample_rate
def _incr(self, key, amount=1, sample_rate=1, **kwargs):
raise NotImplementedError
def incr(self, key, amount=1, sample_rate=1, **kwargs):
self._incr(key=self._get_key(key), amount=amount, sample_rate=sample_rate, **kwargs)
|
py | 1a364c00e068bb1b313d97367e0376f3eb50f18d | from __future__ import unicode_literals
from django import forms, VERSION as DJANGO_VERSION
from django.forms.utils import flatatt
from django.contrib.auth.forms import (
ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget,
PasswordResetForm as OldPasswordResetForm,
UserChangeForm as DjangoUserChangeForm,
AuthenticationForm as DjangoAuthenticationForm,
)
from django.contrib.auth import get_user_model, password_validation
from django.contrib.auth.hashers import identify_hasher, UNUSABLE_PASSWORD_PREFIX
from django.utils.translation import gettext_lazy as _, ugettext
from django.utils.html import format_html
User = get_user_model()
def is_password_usable(pw):
"""Decide whether a password is usable only by the unusable password prefix.
We can't use django.contrib.auth.hashers.is_password_usable either, because
it not only checks against the unusable password, but checks for a valid
hasher too. We need different error messages in those cases.
"""
return not pw.startswith(UNUSABLE_PASSWORD_PREFIX)
class BetterReadOnlyPasswordHashWidget(ReadOnlyPasswordHashWidget):
"""
A ReadOnlyPasswordHashWidget that has a less intimidating output.
"""
def render(self, name, value, attrs=None, renderer=None):
final_attrs = flatatt(self.build_attrs(attrs or {}))
if not value or not is_password_usable(value):
summary = ugettext("No password set.")
else:
try:
identify_hasher(value)
except ValueError:
summary = ugettext("Invalid password format or unknown"
" hashing algorithm.")
else:
summary = ugettext('*************')
return format_html('<div{attrs}><strong>{summary}</strong></div>',
attrs=final_attrs, summary=summary)
class UserChangeForm(DjangoUserChangeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
password = self.fields.get('password')
if password:
password.widget = BetterReadOnlyPasswordHashWidget()
class UserCreationForm(forms.ModelForm):
"""
A form for creating new users. Includes all the required
fields, plus a repeated password.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
'duplicate_username': _("A user with that %(username)s already exists."),
}
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above,"
" for verification."))
class Meta:
model = User
fields = (User.USERNAME_FIELD,) + tuple(User.REQUIRED_FIELDS)
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
def validate_uniqueness_of_username_field(value):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
try:
User._default_manager.get_by_natural_key(value)
except User.DoesNotExist:
return value
raise forms.ValidationError(self.error_messages['duplicate_username'] % {
'username': User.USERNAME_FIELD,
})
self.fields[User.USERNAME_FIELD].validators.append(validate_uniqueness_of_username_field)
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(self.error_messages['password_mismatch'])
return password2
def _post_clean(self):
super(UserCreationForm, self)._post_clean()
# Validate the password after self.instance is updated with form data
# by super().
password = self.cleaned_data.get('password2')
if password:
try:
password_validation.validate_password(password, self.instance)
except forms.ValidationError as error:
self.add_error('password2', error)
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class CaseInsensitiveUsernameFieldCreationForm(UserCreationForm):
"""
This form is the same as UserCreationForm, except that usernames are lowercased before they
are saved. This is to disallow the existence of email address usernames which differ only in
case.
"""
def clean_USERNAME_FIELD(self):
username = self.cleaned_data.get(User.USERNAME_FIELD)
if username:
username = username.lower()
return username
# set the correct clean method on the class so that child classes can override and call super()
setattr(
CaseInsensitiveUsernameFieldCreationForm,
'clean_' + User.USERNAME_FIELD,
CaseInsensitiveUsernameFieldCreationForm.clean_USERNAME_FIELD
)
# alias for the old name for backwards-compatability
CaseInsensitiveEmailUserCreationForm = CaseInsensitiveUsernameFieldCreationForm
class FriendlyPasswordResetForm(OldPasswordResetForm):
error_messages = dict(getattr(OldPasswordResetForm, 'error_messages', {}))
error_messages['unknown'] = _("This email address doesn't have an "
"associated user account. Are you "
"sure you've registered?")
def clean_email(self):
"""Return an error message if the email address being reset is unknown.
This is to revert https://code.djangoproject.com/ticket/19758
The bug #19758 tries not to leak emails through password reset because
only usernames are unique in Django's default user model.
django-authtools leaks email addresses through the registration form.
In the case of django-authtools not warning the user doesn't add any
security, and worsen user experience.
"""
email = self.cleaned_data['email']
results = list(self.get_users(email))
if not results:
raise forms.ValidationError(self.error_messages['unknown'])
return email
class AuthenticationForm(DjangoAuthenticationForm):
def __init__(self, request=None, *args, **kwargs):
super(AuthenticationForm, self).__init__(request, *args, **kwargs)
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.fields['username'].widget = username_field.formfield().widget
|
py | 1a364c2a5e7b6a32f69f91f4bb7118856fd4e2fb | #!/usr/bin/python
"""Script to create user-config.py.
.. versionchanged:: 7.0
moved to pywikibot.scripts folder
"""
#
# (C) Pywikibot team, 2010-2021
#
# Distributed under the terms of the MIT license.
#
import codecs
import os
import re
import sys
from collections import namedtuple
from pathlib import Path
from textwrap import fill
from typing import Optional
from pywikibot.scripts import _import_with_no_user_config
PYTHON_VERSION = sys.version_info[:2]
if PYTHON_VERSION >= (3, 9):
Tuple = tuple
else:
from typing import Tuple
# DISABLED_SECTIONS cannot be copied; variables must be set manually
DISABLED_SECTIONS = {
'USER INTERFACE SETTINGS', # uses sys
'EXTERNAL EDITOR SETTINGS', # uses os
}
OBSOLETE_SECTIONS = {
'ACCOUNT SETTINGS', # already set
}
# Disable user-config usage as we are creating it here
pywikibot = _import_with_no_user_config('pywikibot')
config, __url__ = pywikibot.config, pywikibot.__url__
base_dir = pywikibot.config.base_dir
try:
console_encoding = sys.stdout.encoding
# unittests fails with "StringIO instance has no attribute 'encoding'"
except AttributeError:
console_encoding = None
if console_encoding is None or sys.platform == 'cygwin':
console_encoding = 'iso-8859-1'
USER_BASENAME = 'user-config.py'
PASS_BASENAME = 'user-password.py'
def change_base_dir():
"""Create a new user directory."""
while True:
new_base = pywikibot.input('New user directory? ')
new_base = os.path.abspath(new_base)
if os.path.exists(new_base):
if os.path.isfile(new_base):
pywikibot.error('there is an existing file with that name.')
continue
# make sure user can read and write this directory
if not os.access(new_base, os.R_OK | os.W_OK):
pywikibot.error('directory access restricted')
continue
pywikibot.output('Using existing directory')
else:
try:
os.mkdir(new_base, pywikibot.config.private_files_permission)
except Exception as e:
pywikibot.error('directory creation failed: {}'.format(e))
continue
pywikibot.output('Created new directory.')
break
if new_base == pywikibot.config.get_base_dir(new_base):
# config would find that file
return new_base
msg = fill("""WARNING: Your user files will be created in the directory
'{new_base}' you have chosen. To access these files, you will either have
to use the argument "-dir:{new_base}" every time you run the bot, or set
the environment variable "PYWIKIBOT_DIR" equal to this directory name in
your operating system. See your operating system documentation for how to
set environment variables.""".format(new_base=new_base), width=76)
pywikibot.output(msg)
if pywikibot.input_yn('Is this OK?', default=False, automatic_quit=False):
return new_base
pywikibot.output('Aborting changes.')
return False
def file_exists(filename):
"""Return whether the file exists and print a message if it exists."""
if os.path.exists(filename):
pywikibot.output('{1} already exists in the target directory "{0}".'
.format(*os.path.split(filename)))
return True
return False
def get_site_and_lang(default_family: Optional[str] = 'wikipedia',
default_lang: Optional[str] = 'en',
default_username: Optional[str] = None, force=False):
"""
Ask the user for the family, site code and username.
:param default_family: The default family which should be chosen.
:param default_lang: The default site code which should be chosen,
if the family supports it.
:param default_username: The default username which should be chosen.
:return: The family, site code and username
:rtype: tuple of three str
"""
known_families = sorted(pywikibot.config.family_files.keys())
if default_family not in known_families:
default_family = None
fam = pywikibot.bot.input_list_choice(
'Select family of sites we are working on, '
'just enter the number or name',
known_families,
force=force,
default=default_family)
fam = pywikibot.family.Family.load(fam)
if hasattr(fam, 'langs'):
if hasattr(fam, 'languages_by_size'):
by_size = [code for code in fam.languages_by_size
if code in fam.langs.keys()]
else:
by_size = []
known_langs = by_size + sorted(
set(fam.langs.keys()).difference(by_size))
else:
known_langs = []
if not known_langs:
pywikibot.output('There were no known site codes found in {}.'
.format(fam.name))
default_lang = None
elif len(known_langs) == 1:
pywikibot.output('The only known site code: {}'.format(known_langs[0]))
default_lang = known_langs[0]
else:
pywikibot.output('This is the list of known site oodes:')
pywikibot.output(', '.join(known_langs))
if default_lang not in known_langs:
if default_lang != 'en' and 'en' in known_langs:
default_lang = 'en'
else:
default_lang = None
message = "The site code of the site we're working on"
mycode = None
while not mycode:
mycode = pywikibot.input(message, default=default_lang, force=force)
if known_langs and mycode and mycode not in known_langs:
if not pywikibot.input_yn(
fill('The site code {!r} is not in the list of known '
'sites. Do you want to continue?'.format(mycode)),
default=False, automatic_quit=False):
mycode = None
message = 'Username on {}:{}'.format(mycode, fam.name)
username = pywikibot.input(message, default=default_username, force=force)
# Escape ''s
if username:
username = username.replace("'", "\\'")
return fam.name, mycode, username
EXTENDED_CONFIG = """\
# This is an automatically generated file. You can find more
# configuration parameters in 'config.py' file or refer
# https://doc.wikimedia.org/pywikibot/master/api_ref/pywikibot.config.html
# The family of sites to be working on.
# Pywikibot will import families/xxx_family.py so if you want to change
# this variable, you have to ensure that such a file exists. You may use
# generate_family_file to create one.
family = '{main_family}'
# The site code (language) of the site to be working on.
mylang = '{main_code}'
# The dictionary usernames should contain a username for each site where you
# have a bot account. If you have a unique username for all sites of a
# family , you can use '*'
{usernames}
# The list of BotPasswords is saved in another file. Import it if needed.
# See https://www.mediawiki.org/wiki/Manual:Pywikibot/BotPasswords to know how
# use them.
{botpasswords}
{config_text}"""
SMALL_CONFIG = """\
family = '{main_family}'
mylang = '{main_code}'
{usernames}
{botpasswords}
"""
PASSFILE_CONFIG = """\
# This is an automatically generated file used to store
# BotPasswords.
#
# As a simpler (but less secure) alternative to OAuth, MediaWiki allows bot
# users to uses BotPasswords to limit the permissions given to a bot.
# When using BotPasswords, each instance gets keys. This combination can only
# access the API, not the normal web interface.
#
# See https://www.mediawiki.org/wiki/Manual:Pywikibot/BotPasswords for more
# information.
{botpasswords}"""
def parse_sections() -> list:
"""Parse sections from config.py file.
config.py will be in the pywikibot/ directory whereas
generate_user_files script is in pywikibot/scripts.
:return: a list of ConfigSection named tuples.
"""
data = []
ConfigSection = namedtuple('ConfigSection', 'head, info, section')
config_path = Path(__file__).resolve().parents[1].joinpath('config.py')
if PYTHON_VERSION < (3, 6):
config_path = str(config_path)
with codecs.open(config_path, 'r', 'utf-8') as config_f:
config_file = config_f.read()
result = re.findall(
'^(?P<section># #{5,} (?P<head>[A-Z][A-Z_ ]+[A-Z]) #{5,}\r?\n'
'(?:^#?\r?\n)?' # There may be an empty or short line after header
'(?P<comment>(?:^# .+?)+)' # first comment is used as help string
'^.*?)' # catch the remaining text
'^(?=# #{5,}|# ={5,})', # until section end marker
config_file, re.MULTILINE | re.DOTALL)
for section, head, comment in result:
info = ' '.join(text.strip('# ') for text in comment.splitlines())
data.append(ConfigSection(head, info, section))
return data
def copy_sections():
"""Take config sections and copy them to user-config.py.
:return: config text of all selected sections.
:rtype: str
"""
result = []
sections = parse_sections()
# copy settings
for section in filter(lambda x: x.head not in (DISABLED_SECTIONS
| OBSOLETE_SECTIONS),
sections):
result.append(section.section)
return ''.join(result)
def create_user_config(main_family, main_code, main_username, force=False):
"""
Create a user-config.py in base_dir.
Create a user-password.py if necessary.
"""
_fnc = os.path.join(base_dir, USER_BASENAME)
_fncpass = os.path.join(base_dir, PASS_BASENAME)
useritem = namedtuple('useritem', 'family, code, name')
userlist = []
if force and not config.verbose_output:
if main_username:
userlist = [useritem(main_family, main_code, main_username)]
else:
while True:
userlist += [useritem(*get_site_and_lang(
main_family, main_code, main_username, force=force))]
if not pywikibot.input_yn('Do you want to add any other projects?',
force=force,
default=False, automatic_quit=False):
break
# For each different username entered, ask if user wants to save a
# BotPassword (username, BotPassword name, BotPassword pass)
msg = fill('See {}/BotPasswords to know how to get codes.'
'Please note that plain text in {} and anyone with read '
'access to that directory will be able read the file.'
.format(__url__, _fncpass))
botpasswords = []
userset = {user.name for user in userlist}
for username in userset:
if pywikibot.input_yn('Do you want to add a BotPassword for {}?'
.format(username), force=force, default=False):
if msg:
pywikibot.output(msg)
msg = None
message = 'BotPassword\'s "bot name" for {}'.format(username)
botpasswordname = pywikibot.input(message, force=force)
message = 'BotPassword\'s "password" for "{}" ' \
'(no characters will be shown)' \
.format(botpasswordname)
botpasswordpass = pywikibot.input(message, force=force,
password=True)
if botpasswordname and botpasswordpass:
botpasswords.append((username, botpasswordname,
botpasswordpass))
if not userlist: # Show a sample
usernames = "# usernames['{}']['{}'] = 'MyUsername'".format(
main_family, main_code)
else:
usernames = '\n'.join(
"usernames['{user.family}']['{user.code}'] = '{user.name}'"
.format(user=user) for user in userlist)
# Arbitrarily use the first key as default settings
main_family, main_code = userlist[0].family, userlist[0].code
botpasswords = '\n'.join(
"('{}', BotPassword('{}', {!r}))".format(*botpassword)
for botpassword in botpasswords)
config_text = copy_sections()
if config_text:
config_content = EXTENDED_CONFIG
else:
pywikibot.output('Creating a small variant of user-config.py')
config_content = SMALL_CONFIG
try:
# Finally save user-config.py
with codecs.open(_fnc, 'w', 'utf-8') as f:
f.write(config_content.format(
main_family=main_family,
main_code=main_code,
usernames=usernames,
config_text=config_text,
botpasswords='password_file = ' + ('"{}"'.format(PASS_BASENAME)
if botpasswords
else 'None')))
pywikibot.output("'{}' written.".format(_fnc))
except BaseException:
if os.path.exists(_fnc):
os.remove(_fnc)
raise
save_botpasswords(botpasswords, _fncpass)
def save_botpasswords(botpasswords, _fncpass):
"""Write botpasswords to file."""
if botpasswords:
# Save user-password.py if necessary
# user-config.py is already created at this point
# therefore pywikibot.tools can be imported safely
from pywikibot.tools import file_mode_checker
try:
# First create an empty file with good permissions, before writing
# in it
with codecs.open(_fncpass, 'w', 'utf-8') as f:
f.write('')
file_mode_checker(_fncpass, mode=0o600, quiet=True)
with codecs.open(_fncpass, 'w', 'utf-8') as f:
f.write(PASSFILE_CONFIG.format(botpasswords=botpasswords))
file_mode_checker(_fncpass, mode=0o600)
pywikibot.output("'{0}' written.".format(_fncpass))
except EnvironmentError:
os.remove(_fncpass)
raise
def ask_for_dir_change(force):
"""Ask whether the base directory is has to be changed.
Only give option for directory change if user-config.py or user-password
already exists in the directory. This will repeat if user-config.py also
exists in the requested directory.
:param force: Skip asking for directory change
:type force: bool
:return: whether user file or password file exists already
:rtype: tuple of bool
"""
global base_dir
pywikibot.output('\nYour default user directory is "{}"'.format(base_dir))
while True:
# Show whether file exists
userfile = file_exists(os.path.join(base_dir, USER_BASENAME))
passfile = file_exists(os.path.join(base_dir, PASS_BASENAME))
if force and not config.verbose_output or not (userfile or passfile):
break
if pywikibot.input_yn(
'Would you like to change the directory?',
default=True, automatic_quit=False, force=force):
new_base = change_base_dir()
if new_base:
base_dir = new_base
else:
break
return userfile, passfile
def main(*args: Tuple[str, ...]):
"""
Process command line arguments and generate user-config.
If args is an empty list, sys.argv is used.
:param args: command line arguments
"""
# set the config family and mylang values to an invalid state so that
# the script can detect that the command line arguments -family & -lang
# or -site were used and handle_args has updated these config values,
# and 'force' mode can be activated below.
config.family, config.mylang = 'wikipedia', None
local_args = pywikibot.handle_args(args)
if local_args:
pywikibot.output('Unknown argument{}: {}'
.format('s' if len(local_args) > 1 else '',
', '.join(local_args)))
return
pywikibot.output('You can abort at any time by pressing ctrl-c')
if config.mylang is not None:
force = True
pywikibot.output('Automatically generating user-config.py')
else:
force = False
# Force default site of en.wikipedia
config.family, config.mylang = 'wikipedia', 'en'
username = config.usernames[config.family].get(config.mylang)
try:
has_userfile, has_passfile = ask_for_dir_change(force)
if not (has_userfile or has_passfile):
create_user_config(config.family, config.mylang, username,
force=force)
except KeyboardInterrupt:
pywikibot.output('\nScript terminated by user.')
# Creation of user-fixes.py has been replaced by an example file.
if __name__ == '__main__':
main()
|
py | 1a364c8f9785e0871f7f2928bc144de04b7c2f52 | """ Get the Bots in any chat*
Syntax: .get_bot"""
from telethon import events
from telethon.tl.types import ChannelParticipantAdmin, ChannelParticipantsBots
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="get_bot ?(.*)"))
async def _(event):
if event.fwd_from:
return
mentions = "**Bots in this Channel**: \n"
input_str = event.pattern_match.group(1)
to_write_chat = await event.get_input_chat()
chat = None
if not input_str:
chat = to_write_chat
else:
mentions = "Bots in {} channel: \n".format(input_str)
try:
chat = await borg.get_entity(input_str)
except Exception as e:
await event.edit(str(e))
return None
try:
async for x in borg.iter_participants(chat, filter=ChannelParticipantsBots):
if isinstance(x.participant, ChannelParticipantAdmin):
mentions += "\n ⚜️ [{}](tg://user?id={}) `{}`".format(x.first_name, x.id, x.id)
else:
mentions += "\n [{}](tg://user?id={}) `{}`".format(x.first_name, x.id, x.id)
except Exception as e:
mentions += " " + str(e) + "\n"
await event.edit(mentions)
|
py | 1a364d7008908edb1a723ad0733b36c7fb828b17 | # proxy module
from __future__ import absolute_import
from tvtk.special_gen import *
|
py | 1a364eb411c7203a99de080728de3b2f5760c115 | # SPDX-FileCopyrightText: Copyright (c) 2011 LG Electronics Inc.
#
# SPDX-License-Identifier: GPL-3.0-only
import os
from fosslight_util.set_log import init_log
def main():
output_dir = "tests"
logger, _result_log = init_log(os.path.join(output_dir, "test_add_log.txt"))
logger.warning("TESTING - add mode")
if __name__ == '__main__':
main()
|
py | 1a364edaa94fd8572f087e419952770a597f44a3 | # coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional, Union, Literal # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, validator, Field, Extra # noqa: F401
from aries_cloudcontroller.model.dif_options import DIFOptions
from aries_cloudcontroller.model.presentation_definition import PresentationDefinition
class DIFProofRequest(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
DIFProofRequest - a model defined in OpenAPI
presentation_definition: The presentation_definition of this DIFProofRequest.
options: The options of this DIFProofRequest [Optional].
"""
presentation_definition: PresentationDefinition
options: Optional[DIFOptions] = None
def __init__(
self,
*,
presentation_definition: PresentationDefinition = None,
options: Optional[DIFOptions] = None,
**kwargs,
):
super().__init__(
options=options,
presentation_definition=presentation_definition,
**kwargs,
)
class Config:
allow_population_by_field_name = True
DIFProofRequest.update_forward_refs()
|
py | 1a364f47681a0076e86a407e5783c90de189cdff | from myproject.helpers import complex_function, complex_function_with_params
class MyClass:
def __init__(self, name):
self.name = name
def sayhi(self):
return "hi my name is: {}".format(self.name)
def function_a():
return complex_function().upper()
def function_b():
param1 = MyClass("foo")
return param1.sayhi()
def function_c(param):
output = param.sayhi()
return output.upper()
def function_d(param):
name = param.name
return name.upper()
def function_e(param1, param2):
return complex_function_with_params(param1.upper(), param2.upper())
def file_contents_to_uppercase(path_to_file):
with open(path_to_file, "r") as f:
contents = f.read()
return contents.upper()
|
py | 1a364f484a3f3e89cac253a7ec145cfe5e117295 | #!/usr/bin/env python3
import subprocess
import os
import sys
sys.path.append("../")
sys.path.append("../../system/lib/")
sys.path.append("../volume/")
sys.path.append("../array/")
import json_parser
import pos
import cli
import api
import json
import MOUNT_ARRAY_BASIC
SPARE = MOUNT_ARRAY_BASIC.SPARE
ARRAYNAME = MOUNT_ARRAY_BASIC.ARRAYNAME
def check_result():
if api.check_state(ARRAYNAME, "NORMAL") == True:
if api.is_device_in_the_array(ARRAYNAME, SPARE) == False:
return "pass"
return "fail"
def execute():
MOUNT_ARRAY_BASIC.execute()
out = cli.remove_device(SPARE, ARRAYNAME)
return out
if __name__ == "__main__":
if len(sys.argv) >= 2:
pos.set_addr(sys.argv[1])
api.clear_result(__file__)
out = execute()
result = check_result()
ret = api.set_result_manually(out, result, __file__)
pos.flush_and_kill_pos()
exit(ret) |
py | 1a364f7ca793a59dfaa8bc3084069c4632c9fdb8 | test = { 'name': 'q3_6',
'points': 1,
'suites': [ { 'cases': [ {'code': '>>> rate_means.num_rows == 2\nTrue', 'hidden': False, 'locked': False},
{'code': '>>> round(rate_means.where("Death Penalty", False).column(1).item(0), 15) == 8.120454540452272\nTrue', 'hidden': False, 'locked': False},
{'code': '>>> round(rate_means.where("Death Penalty", True).column(1).item(0), 15) == 7.513636380386362\nTrue', 'hidden': False, 'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
|
py | 1a36501346751846c6d2dfe9935896541858e37d | """Implement models for EFS resources.
See AWS docs for details:
https://docs.aws.amazon.com/efs/latest/ug/whatisefs.html
"""
import json
import time
from copy import deepcopy
from hashlib import md5
from moto.core import ACCOUNT_ID, BaseBackend, CloudFormationModel
from moto.core.utils import (
camelcase_to_underscores,
get_random_hex,
underscores_to_camelcase,
BackendDict,
)
from moto.ec2 import ec2_backends
from moto.ec2.exceptions import InvalidSubnetIdError
from moto.efs.exceptions import (
BadRequest,
FileSystemAlreadyExists,
FileSystemInUse,
FileSystemNotFound,
MountTargetConflict,
MountTargetNotFound,
PolicyNotFound,
SubnetNotFound,
SecurityGroupNotFound,
SecurityGroupLimitExceeded,
)
def _lookup_az_id(az_name):
"""Find the Availability zone ID given the AZ name."""
ec2 = ec2_backends[az_name[:-1]]
for zone in ec2.describe_availability_zones():
if zone.name == az_name:
return zone.zone_id
class FileSystem(CloudFormationModel):
"""A model for an EFS File System Volume."""
def __init__(
self,
region_name,
creation_token,
file_system_id,
performance_mode="generalPurpose",
encrypted=False,
kms_key_id=None,
throughput_mode="bursting",
provisioned_throughput_in_mibps=None,
availability_zone_name=None,
backup=False,
lifecycle_policies=None,
file_system_policy=None,
tags=None,
):
if availability_zone_name:
backup = True
if kms_key_id and not encrypted:
raise BadRequest('If kms_key_id given, "encrypted" must be True.')
# Save given parameters
self.creation_token = creation_token
self.performance_mode = performance_mode
self.encrypted = encrypted
self.kms_key_id = kms_key_id
self.throughput_mode = throughput_mode
self.provisioned_throughput_in_mibps = provisioned_throughput_in_mibps
self.availability_zone_name = availability_zone_name
self.availability_zone_id = None
if self.availability_zone_name:
self.availability_zone_id = _lookup_az_id(self.availability_zone_name)
self._backup = backup
self.lifecycle_policies = lifecycle_policies
self.file_system_policy = file_system_policy
# Validate tag structure.
if tags is None:
self.tags = []
else:
if (
not isinstance(tags, list)
or not all(isinstance(tag, dict) for tag in tags)
or not all(set(tag.keys()) == {"Key", "Value"} for tag in tags)
):
raise ValueError("Invalid tags: {}".format(tags))
else:
self.tags = tags
# Generate AWS-assigned parameters
self.file_system_id = file_system_id
self.file_system_arn = "arn:aws:elasticfilesystem:{region}:{user_id}:file-system/{file_system_id}".format(
region=region_name, user_id=ACCOUNT_ID, file_system_id=self.file_system_id
)
self.creation_time = time.time()
self.owner_id = ACCOUNT_ID
# Initialize some state parameters
self.life_cycle_state = "available"
self._mount_targets = {}
self._size_value = 0
@property
def size_in_bytes(self):
return {
"Value": self._size_value,
"ValueInIA": 0,
"ValueInStandard": self._size_value,
"Timestamp": time.time(),
}
@property
def physical_resource_id(self):
return self.file_system_id
@property
def number_of_mount_targets(self):
return len(self._mount_targets)
@property
def backup_policy(self):
if self._backup:
return {"Status": "ENABLED"}
else:
return
def info_json(self):
ret = {
underscores_to_camelcase(k.capitalize()): v
for k, v in self.__dict__.items()
if not k.startswith("_")
}
ret["SizeInBytes"] = self.size_in_bytes
ret["NumberOfMountTargets"] = self.number_of_mount_targets
return ret
def add_mount_target(self, subnet, mount_target):
# Check that the mount target doesn't violate constraints.
for other_mount_target in self._mount_targets.values():
if other_mount_target.subnet_vpc_id != subnet.vpc_id:
raise MountTargetConflict(
"requested subnet for new mount target is not in the same VPC as existing mount targets"
)
if subnet.availability_zone in self._mount_targets:
raise MountTargetConflict("mount target already exists in this AZ")
self._mount_targets[subnet.availability_zone] = mount_target
def has_mount_target(self, subnet):
return subnet.availability_zone in self._mount_targets
def iter_mount_targets(self):
for mt in self._mount_targets.values():
yield mt
def remove_mount_target(self, subnet):
del self._mount_targets[subnet.availability_zone]
@staticmethod
def cloudformation_name_type():
return
@staticmethod
def cloudformation_type():
return "AWS::EFS::FileSystem"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-efs-filesystem.html
props = deepcopy(cloudformation_json["Properties"])
props = {camelcase_to_underscores(k): v for k, v in props.items()}
if "file_system_tags" in props:
props["tags"] = props.pop("file_system_tags")
if "backup_policy" in props:
if "status" not in props["backup_policy"]:
raise ValueError("BackupPolicy must be of type BackupPolicy.")
status = props.pop("backup_policy")["status"]
if status not in ["ENABLED", "DISABLED"]:
raise ValueError('Invalid status: "{}".'.format(status))
props["backup"] = status == "ENABLED"
if "bypass_policy_lockout_safety_check" in props:
raise ValueError(
"BypassPolicyLockoutSafetyCheck not currently "
"supported by AWS Cloudformation."
)
return efs_backends[region_name].create_file_system(resource_name, **props)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
raise NotImplementedError(
"Update of EFS File System via cloudformation is not yet implemented."
)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
return efs_backends[region_name].delete_file_system(resource_name)
class MountTarget(CloudFormationModel):
"""A model for an EFS Mount Target."""
def __init__(self, file_system, subnet, ip_address, security_groups):
# Set the simple given parameters.
self.file_system_id = file_system.file_system_id
self._file_system = file_system
self._file_system.add_mount_target(subnet, self)
self.subnet_id = subnet.id
self._subnet = subnet
self.vpc_id = subnet.vpc_id
self.security_groups = security_groups
# Check the number of security groups.
if self.security_groups is not None and len(self.security_groups) > 5:
raise SecurityGroupLimitExceeded(
"The maximum number of security groups per interface has been reached."
)
# Get an IP address if needed, otherwise validate the one we're given.
if ip_address is None:
ip_address = subnet.get_available_subnet_ip(self)
else:
try:
subnet.request_ip(ip_address, self)
except Exception as e:
if "IP" in str(e) and "CIDR" in str(e):
raise BadRequest(
"Address does not fall within the subnet's address range"
)
else:
raise e
self.ip_address = ip_address
# Init non-user-assigned values.
self.owner_id = ACCOUNT_ID
self.mount_target_id = "fsmt-{}".format(get_random_hex())
self.life_cycle_state = "available"
self.network_interface_id = None
self.availability_zone_id = subnet.availability_zone_id
self.availability_zone_name = subnet.availability_zone
def clean_up(self):
self._file_system.remove_mount_target(self._subnet)
self._subnet.del_subnet_ip(self.ip_address)
def set_network_interface(self, network_interface):
self.network_interface_id = network_interface.id
def info_json(self):
ret = {
underscores_to_camelcase(k.capitalize()): v
for k, v in self.__dict__.items()
if not k.startswith("_")
}
return ret
@property
def physical_resource_id(self):
return self.mounted_target_id
@property
def subnet_vpc_id(self):
return self._subnet.vpc_id
@staticmethod
def cloudformation_name_type():
pass
@staticmethod
def cloudformation_type():
return "AWS::EFS::MountTarget"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-efs-mounttarget.html
props = deepcopy(cloudformation_json["Properties"])
props = {camelcase_to_underscores(k): v for k, v in props.items()}
return efs_backends[region_name].create_mount_target(**props)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
raise NotImplementedError(
"Updates of EFS Mount Target via cloudformation are not yet implemented."
)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
return efs_backends[region_name].delete_mount_target(resource_name)
class EFSBackend(BaseBackend):
"""The backend manager of EFS resources.
This is the state-machine for each region, tracking the file systems, mount targets,
and eventually access points that are deployed. Creating, updating, and destroying
such resources should always go through this class.
"""
def __init__(self, region_name=None):
super().__init__()
self.region_name = region_name
self.creation_tokens = set()
self.file_systems_by_id = {}
self.mount_targets_by_id = {}
self.next_markers = {}
def reset(self):
# preserve region
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def _mark_description(self, corpus, max_items):
if max_items < len(corpus):
new_corpus = corpus[max_items:]
new_hash = md5(json.dumps(new_corpus).encode("utf-8"))
next_marker = new_hash.hexdigest()
self.next_markers[next_marker] = new_corpus
else:
next_marker = None
return next_marker
@property
def ec2_backend(self):
return ec2_backends[self.region_name]
def create_file_system(self, creation_token, **params):
"""Create a new EFS File System Volume.
https://docs.aws.amazon.com/efs/latest/ug/API_CreateFileSystem.html
"""
if not creation_token:
raise ValueError("No creation token given.")
if creation_token in self.creation_tokens:
raise FileSystemAlreadyExists(creation_token)
# Create a new file system ID:
def make_id():
return "fs-{}".format(get_random_hex())
fsid = make_id()
while fsid in self.file_systems_by_id:
fsid = make_id()
self.file_systems_by_id[fsid] = FileSystem(
self.region_name,
creation_token,
fsid,
**{k: v for k, v in params.items() if v is not None}
)
self.creation_tokens.add(creation_token)
return self.file_systems_by_id[fsid]
def describe_file_systems(self, marker, max_items, creation_token, file_system_id):
"""Describe all the EFS File Systems, or specific File Systems.
https://docs.aws.amazon.com/efs/latest/ug/API_DescribeFileSystems.html
"""
# Restrict the possible corpus of resules based on inputs.
if creation_token and file_system_id:
raise BadRequest(
"Request cannot contain both a file system ID and a creation token."
)
elif creation_token:
# Handle the creation token case.
corpus = []
for fs in self.file_systems_by_id.values():
if fs.creation_token == creation_token:
corpus.append(fs.info_json())
elif file_system_id:
# Handle the case that a file_system_id is given.
if file_system_id not in self.file_systems_by_id:
raise FileSystemNotFound(file_system_id)
corpus = [self.file_systems_by_id[file_system_id]]
elif marker is not None:
# Handle the case that a marker is given.
if marker not in self.next_markers:
raise BadRequest("Invalid Marker")
corpus = self.next_markers[marker]
else:
# Handle the vanilla case.
corpus = [fs.info_json() for fs in self.file_systems_by_id.values()]
# Handle the max_items parameter.
file_systems = corpus[:max_items]
next_marker = self._mark_description(corpus, max_items)
return next_marker, file_systems
def create_mount_target(
self, file_system_id, subnet_id, ip_address=None, security_groups=None
):
"""Create a new EFS Mount Target for a given File System to a given subnet.
Note that you can only create one mount target for each availability zone
(which is implied by the subnet ID).
https://docs.aws.amazon.com/efs/latest/ug/API_CreateMountTarget.html
"""
# Get the relevant existing resources
try:
subnet = self.ec2_backend.get_subnet(subnet_id)
except InvalidSubnetIdError:
raise SubnetNotFound(subnet_id)
if file_system_id not in self.file_systems_by_id:
raise FileSystemNotFound(file_system_id)
file_system = self.file_systems_by_id[file_system_id]
# Validate the security groups.
if security_groups:
sg_lookup = {sg.id for sg in self.ec2_backend.describe_security_groups()}
for sg_id in security_groups:
if sg_id not in sg_lookup:
raise SecurityGroupNotFound(sg_id)
# Create the new mount target
mount_target = MountTarget(file_system, subnet, ip_address, security_groups)
# Establish the network interface.
network_interface = self.ec2_backend.create_network_interface(
subnet, [mount_target.ip_address], group_ids=security_groups
)
mount_target.set_network_interface(network_interface)
# Record the new mount target
self.mount_targets_by_id[mount_target.mount_target_id] = mount_target
return mount_target
def describe_mount_targets(
self, max_items, file_system_id, mount_target_id, access_point_id, marker
):
"""Describe the mount targets given a mount target ID or a file system ID.
Note that as of this writing access points, and thus access point IDs are not
supported.
https://docs.aws.amazon.com/efs/latest/ug/API_DescribeMountTargets.html
"""
# Restrict the possible corpus of results based on inputs.
if not (bool(file_system_id) ^ bool(mount_target_id) ^ bool(access_point_id)):
raise BadRequest("Must specify exactly one mutually exclusive parameter.")
elif file_system_id:
# Handle the case that a file_system_id is given.
if file_system_id not in self.file_systems_by_id:
raise FileSystemNotFound(file_system_id)
corpus = [
mt.info_json()
for mt in self.file_systems_by_id[file_system_id].iter_mount_targets()
]
elif mount_target_id:
if mount_target_id not in self.mount_targets_by_id:
raise MountTargetNotFound(mount_target_id)
# Handle mount target specification case.
corpus = [self.mount_targets_by_id[mount_target_id].info_json()]
else:
# We don't handle access_point_id's yet.
assert False, "Moto does not yet support EFS access points."
# Handle the case that a marker is given. Note that the handling is quite
# different from that in describe_file_systems.
if marker is not None:
if marker not in self.next_markers:
raise BadRequest("Invalid Marker")
corpus_mtids = {m["MountTargetId"] for m in corpus}
marked_mtids = {m["MountTargetId"] for m in self.next_markers[marker]}
mt_ids = corpus_mtids & marked_mtids
corpus = [self.mount_targets_by_id[mt_id].info_json() for mt_id in mt_ids]
# Handle the max_items parameter.
mount_targets = corpus[:max_items]
next_marker = self._mark_description(corpus, max_items)
return next_marker, mount_targets
def delete_file_system(self, file_system_id):
"""Delete the file system specified by the given file_system_id.
Note that mount targets must be deleted first.
https://docs.aws.amazon.com/efs/latest/ug/API_DeleteFileSystem.html
"""
if file_system_id not in self.file_systems_by_id:
raise FileSystemNotFound(file_system_id)
file_system = self.file_systems_by_id[file_system_id]
if file_system.number_of_mount_targets > 0:
raise FileSystemInUse(
"Must delete all mount targets before deleting file system."
)
del self.file_systems_by_id[file_system_id]
self.creation_tokens.remove(file_system.creation_token)
return
def delete_mount_target(self, mount_target_id):
"""Delete a mount target specified by the given mount_target_id.
Note that this will also delete a network interface.
https://docs.aws.amazon.com/efs/latest/ug/API_DeleteMountTarget.html
"""
if mount_target_id not in self.mount_targets_by_id:
raise MountTargetNotFound(mount_target_id)
mount_target = self.mount_targets_by_id[mount_target_id]
self.ec2_backend.delete_network_interface(mount_target.network_interface_id)
del self.mount_targets_by_id[mount_target_id]
mount_target.clean_up()
return
def describe_backup_policy(self, file_system_id):
backup_policy = self.file_systems_by_id[file_system_id].backup_policy
if not backup_policy:
raise PolicyNotFound("None")
return backup_policy
efs_backends = BackendDict(EFSBackend, "efs")
|
py | 1a36501c72deb055c1bf75aa2a0db8b36c82b7b1 | # -*- coding: utf-8 -*-
"""
author: zengbin93
email: [email protected]
create_dt: 2021/12/13 17:39
describe: 事件性能分析
"""
import os
import os.path
import traceback
import pandas as pd
import matplotlib.pyplot as plt
from datetime import timedelta, datetime
from tqdm import tqdm
from typing import Callable, List
from czsc.objects import Factor
from czsc.data.ts_cache import TsDataCache
from czsc.sensors.utils import generate_signals
from czsc.utils import io
from czsc.utils import WordWriter
plt.style.use('ggplot')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
class FactorsSensor:
"""因子(Factor)感应器:分析各种信号和因子的表现"""
def __init__(self,
results_path: str,
sdt: str,
edt: str,
dc: TsDataCache,
base_freq: str,
freqs: List[str],
get_signals: Callable,
get_factors: Callable):
self.name = self.__class__.__name__
self.version = "V20211213"
os.makedirs(results_path, exist_ok=True)
self.results_path = results_path
self.sdt = sdt
self.edt = edt
self.get_signals = get_signals
self.get_factors = get_factors
self.factors: List[Factor] = get_factors()
self.base_freq = base_freq
self.freqs = freqs
self.file_docx = os.path.join(results_path, f'factors_sensor_{sdt}_{edt}.docx')
self.writer = WordWriter(self.file_docx)
self.dc = dc
self.betas = ['000001.SH', '000016.SH', '000905.SH', '000300.SH', '399001.SZ', '399006.SZ']
self.file_sf = os.path.join(results_path, f'factors_{sdt}_{edt}.pkl')
self.signals_path = os.path.join(results_path, 'signals')
os.makedirs(self.signals_path, exist_ok=True)
if os.path.exists(self.file_sf):
self.sf = io.read_pkl(self.file_sf)
else:
self.sf = self.get_stock_factors()
io.save_pkl(self.sf, self.file_sf)
def get_share_factors(self, ts_code: str, name: str):
"""获取单个标的因子信息"""
dc = self.dc
sdt = self.sdt
edt = self.edt
factors = self.factors
start_date = pd.to_datetime(self.sdt) - timedelta(days=3000)
bars = dc.pro_bar(ts_code=ts_code, start_date=start_date, end_date=edt, freq='D', asset="E", raw_bar=True)
n_bars = dc.pro_bar(ts_code=ts_code, start_date=sdt, end_date=edt, freq='D', asset="E", raw_bar=False)
nb_dicts = {row['trade_date'].strftime("%Y%m%d"): row for row in n_bars.to_dict("records")}
signals = generate_signals(bars, sdt, self.base_freq, self.freqs, self.get_signals)
results = []
for s in signals:
row = {'name': name, 'ts_code': ts_code}
for factor in factors:
row[factor.name] = factor.is_match(s)
nb_info = nb_dicts.get(s['dt'].strftime("%Y%m%d"), None)
row.update(nb_info)
results.append(row)
df_res = pd.DataFrame(results)
if df_res.empty:
return df_res
df_res = df_res[pd.to_datetime(sdt) <= df_res['trade_date']]
df_res = df_res[df_res['trade_date'] <= pd.to_datetime(edt)]
# 加入总市值
df_ = dc.daily_basic(ts_code, sdt, dc.edt)
df_['trade_date'] = pd.to_datetime(df_['trade_date'])
df_res = df_res.merge(df_[['trade_date', 'total_mv']], on='trade_date', how='left')
return signals, df_res
def get_stock_factors(self):
"""获取全部股票的因子信息"""
stocks = self.dc.stock_basic()
all_factors = []
for row in tqdm(stocks.to_dict('records'), desc="get_stock_factors"):
ts_code = row['ts_code']
name = row['name']
try:
signals, factors = self.get_share_factors(ts_code, name)
all_factors.append(factors)
file_signals = os.path.join(self.signals_path, f'{ts_code}.pkl')
io.save_pkl(signals, file_signals)
except:
print(f"get_share_factors error: {ts_code}, {name}")
traceback.print_exc()
df_factors = pd.concat(all_factors, ignore_index=True)
return df_factors
def validate_performance(self):
factors = self.factors
sf = self.sf
results = [{
"name": "全市场", "count": len(sf), 'n1b': sf.n1b.mean(), 'n2b': sf.n2b.mean(),
'n3b': sf.n3b.mean(), 'n5b': sf.n5b.mean(), 'n10b': sf.n10b.mean(), 'n20b': sf.n20b.mean()
}]
for factor in factors:
df = sf[sf[factor.name]]
row = {"name": factor.name, "count": len(df)}
row.update(df[['n1b', 'n2b', 'n3b', 'n5b', 'n10b', 'n20b']].mean().to_dict())
results.append(row)
df_nb_info = pd.DataFrame(results)
df_nb_info.to_excel(os.path.join(self.results_path, f"factors_nb_info.xlsx"), index=False)
|
py | 1a3650afaae361e8df3b7c8a3981cc30751a62e9 | #!/usr/bin/env python3
# encoding: utf-8
"""
pyQms
-----
Python module for fast and accurate mass spectrometry data quantification
:license: MIT, see LICENSE.txt for more details
Authors:
* Leufken, J.
* Niehues, A.
* Sarin, L.P.
* Hippler, M.
* Leidel, S.A.
* Fufezan, C.
"""
import pickle
import sys
import os
try:
import pymzml
import pymzml.plot
except:
print("Please install pymzML via: pip install pymzml")
def main(result_pkl=None):
"""
usage:
./plot_match_examples.py <Path2ResultPkl>
Extracts the match information and plots one example isotopologue match into
the 'data' folder. Uses the plot function of pymzML (`pymzML.plot`_). Use
this script as template for annotating spectra with match information.
Note:
Plots only one high scored formula (mScore >0.95) from the result pkl.
Use e.g. with the 'BSA1.mzML_pyQms_results.pkl' obtained from e.g.
example script parse_ident_file_and_quantify_with_carbamidomethylation.py
to get example plotting data.
.. _pymzML.plot:
https://pymzml.github.io/plot.html
"""
results_class = pickle.load(open(result_pkl, "rb"))
for key, i, entry in results_class.extract_results():
if entry.score > 0.95:
p = pymzml.plot.Factory()
label_x = []
measured_peaks = []
matched_peaks = []
for (
measured_mz,
measured_intensity,
relative_i,
calculated_mz,
calculated_intensity,
) in entry.peaks:
if measured_mz is not None:
measured_peaks.append((measured_mz, measured_intensity))
matched_peaks.append(
(calculated_mz, calculated_intensity * entry.scaling_factor)
)
label_x.append(
(
calculated_mz,
"{0:5.3f} ppm".format(
(measured_mz - calculated_mz) / (measured_mz * 1e-6)
),
)
)
mz_only = [n[0] for n in measured_peaks]
mz_range = [min(mz_only) - 1, max(mz_only) + 1]
peptides = results_class.lookup["formula to molecule"][key.formula]
if len(peptides) > 1:
continue
p.newPlot(
header="Formula: {0}; Peptide: {1}; Charge: {2}\n File: {3}; Scan: {4}; RT: {5:1.3f}\n Amount: {6:1.3f}; Score: {7:1.3f}".format(
key.formula,
peptides[0],
key.charge,
key.file_name,
entry.spec_id,
entry.rt,
entry.scaling_factor,
entry.score,
),
mzRange=mz_range,
)
p.add(measured_peaks, color=(0, 0, 0), style="sticks")
p.add(matched_peaks, color=(0, 200, 0), style="triangles")
p.add(label_x, color=(0, 0, 255), style="label_x")
plot_name = os.path.join(
os.pardir,
"data",
"{0}_Peptide_{1}_Charge_{2}.xhtml".format(
key.file_name, peptides[0], key.charge
),
)
p.save(filename=plot_name, mzRange=mz_range)
print("Plotted file {0}".format(plot_name))
break
if __name__ == "__main__":
if len(sys.argv) < 2:
print(main.__doc__)
else:
main(result_pkl=sys.argv[1])
|
py | 1a36513a7504d4dd7225481d3c0c2244b3793bf9 | #!/usr/bin/python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import shutil
import subprocess
import sys
import os
from charmhelpers.fetch import (
apt_install, filter_installed_packages,
apt_update
)
from charmhelpers.core.hookenv import (
open_port,
close_port,
relation_get,
relation_set,
relation_ids,
config,
Hooks, UnregisteredHookError,
log,
status_set,
WARNING,
DEBUG,
)
from charmhelpers.core.host import (
service_restart,
lsb_release,
mkdir,
init_is_systemd,
)
from charmhelpers.contrib.openstack.context import ADDRESS_TYPES
from charmhelpers.contrib.openstack.utils import (
configure_installation_source,
openstack_upgrade_available,
pausable_restart_on_change as restart_on_change,
is_unit_paused_set,
get_os_codename_install_source,
CompareOpenStackReleases,
)
from charmhelpers.contrib.openstack.ha.utils import (
update_dns_ha_resource_params,
)
from ceilometer_utils import (
disable_package_apache_site,
get_packages,
CEILOMETER_DB,
CEILOMETER_SERVICE,
CEILOMETER_ROLE,
CEILOMETER_API_SYSTEMD_CONF,
register_configs,
restart_map,
run_in_apache,
services,
get_ceilometer_context,
get_shared_secret,
do_openstack_upgrade,
set_shared_secret,
assess_status,
reload_systemd,
ceilometer_upgrade,
)
from ceilometer_contexts import CEILOMETER_PORT
from charmhelpers.contrib.openstack.ip import (
canonical_url,
PUBLIC, INTERNAL, ADMIN
)
from charmhelpers.contrib.charmsupport import nrpe
from charmhelpers.contrib.network.ip import (
get_iface_for_address,
get_netmask_for_address,
get_relation_ip,
is_ipv6,
)
from charmhelpers.contrib.hahelpers.cluster import (
get_hacluster_config,
is_clustered,
is_elected_leader
)
from charmhelpers.contrib.peerstorage import (
peer_retrieve,
peer_store,
)
from charmhelpers.payload.execd import execd_preinstall
from charmhelpers.contrib.hardening.harden import harden
hooks = Hooks()
CONFIGS = register_configs()
@hooks.hook('install.real')
@harden()
def install():
execd_preinstall()
origin = config('openstack-origin')
if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and origin == 'distro'):
origin = 'cloud:precise-grizzly'
configure_installation_source(origin)
packages = filter_installed_packages(get_packages())
if packages:
status_set('maintenance', 'Installing packages')
apt_update(fatal=True)
apt_install(packages, fatal=True)
if init_is_systemd():
# NOTE(jamespage): ensure systemd override folder exists prior to
# attempting to write override.conf
mkdir(os.path.dirname(CEILOMETER_API_SYSTEMD_CONF))
if run_in_apache():
disable_package_apache_site()
@hooks.hook("amqp-relation-joined")
def amqp_joined():
relation_set(username=config('rabbit-user'),
vhost=config('rabbit-vhost'))
@hooks.hook("shared-db-relation-joined")
def db_joined():
relation_set(ceilometer_database=CEILOMETER_DB)
@hooks.hook("metric-service-relation-joined")
def metric_service_joined():
# NOTE(jamespage): gnocchiclient is required to support
# the gnocchi event dispatcher
apt_install(filter_installed_packages(['python-gnocchiclient']),
fatal=True)
@hooks.hook("amqp-relation-changed",
"amqp-relation-departed",
"shared-db-relation-changed",
"shared-db-relation-departed",
"identity-service-relation-changed",
"identity-service-relation-departed",
"identity-credentials-relation-changed",
"identity-credentials-relation-departed",
"metric-service-relation-changed",
"metric-service-relation-departed")
@restart_on_change(restart_map())
def any_changed():
CONFIGS.write_all()
configure_https()
for rid in relation_ids('identity-service'):
keystone_joined(relid=rid)
ceilometer_joined()
# NOTE(jamespage): ceilometer@ocata requires both gnocchi
# and mongodb to be configured to successfully
# upgrade the underlying data stores.
if ('metric-service' in CONFIGS.complete_contexts() and
'identity-service' in CONFIGS.complete_contexts()):
cmp_codename = CompareOpenStackReleases(
get_os_codename_install_source(config('openstack-origin')))
# NOTE(jamespage): however at queens, this limitation has gone!
if (cmp_codename < 'queens' and
'mongodb' not in CONFIGS.complete_contexts()):
return
ceilometer_upgrade()
def configure_https():
"""Enables SSL API Apache config if appropriate."""
# need to write all to ensure changes to the entire request pipeline
# propagate (c-api, haprxy, apache)
cmp_codename = CompareOpenStackReleases(
get_os_codename_install_source(config('openstack-origin')))
if cmp_codename >= 'queens':
return
CONFIGS.write_all()
if 'https' in CONFIGS.complete_contexts():
cmd = ['a2ensite', 'openstack_https_frontend']
subprocess.check_call(cmd)
else:
cmd = ['a2dissite', 'openstack_https_frontend']
subprocess.check_call(cmd)
# TODO: improve this by checking if local CN certs are available
# first then checking reload status (see LP #1433114).
if not is_unit_paused_set():
try:
subprocess.check_call(['service', 'apache2', 'reload'])
except subprocess.CalledProcessError:
subprocess.call(['service', 'apache2', 'restart'])
@hooks.hook('config-changed')
@restart_on_change(restart_map())
@harden()
def config_changed():
if not config('action-managed-upgrade'):
if openstack_upgrade_available('ceilometer-common'):
status_set('maintenance', 'Upgrading to new OpenStack release')
do_openstack_upgrade(CONFIGS)
install_event_pipeline_setting()
update_nrpe_config()
CONFIGS.write_all()
# NOTE(jamespage): Drop when charm switches to apache2+mod_wsgi
# reload ensures port override is set correctly
reload_systemd()
ceilometer_joined()
cmp_codename = CompareOpenStackReleases(
get_os_codename_install_source(config('openstack-origin')))
if cmp_codename < 'queens':
open_port(CEILOMETER_PORT)
else:
close_port(CEILOMETER_PORT)
configure_https()
# NOTE(jamespage): Iterate identity-{service,credentials} relations
# to pickup any required databag changes on these
# relations.
for rid in relation_ids('identity-service'):
keystone_joined(relid=rid)
for rid in relation_ids('identity-credentials'):
keystone_credentials_joined(relid=rid)
# Define the new ocf resource and use the key delete_resources to delete
# legacy resource for >= Liberty since the ceilometer-agent-central moved
# to ceilometer-polling in liberty (see LP: #1606787).
for rid in relation_ids('ha'):
ha_joined(rid)
def install_event_pipeline_setting():
src_file = 'files/event_pipeline_alarm.yaml'
dest_file = '/etc/ceilometer/event_pipeline_alarm.yaml'
if not os.path.isdir(os.path.dirname(dest_file)):
os.makedirs(os.path.dirname(dest_file))
shutil.copy(src_file, dest_file)
@hooks.hook('upgrade-charm')
@harden()
def upgrade_charm():
install()
update_nrpe_config()
any_changed()
for rid in relation_ids('cluster'):
cluster_joined(relation_id=rid)
@hooks.hook('cluster-relation-joined')
@restart_on_change(restart_map(), stopstart=True)
def cluster_joined(relation_id=None):
# If this node is the elected leader then share our secret with other nodes
if is_elected_leader('grp_ceilometer_vips'):
peer_store('shared_secret', get_shared_secret())
CONFIGS.write_all()
settings = {}
for addr_type in ADDRESS_TYPES:
address = get_relation_ip(
addr_type,
cidr_network=config('os-{}-network'.format(addr_type)))
if address:
settings['{}-address'.format(addr_type)] = address
settings['private-address'] = get_relation_ip('cluster')
relation_set(relation_id=relation_id, relation_settings=settings)
@hooks.hook('cluster-relation-changed',
'cluster-relation-departed')
@restart_on_change(restart_map(), stopstart=True)
def cluster_changed():
shared_secret = peer_retrieve('shared_secret')
if shared_secret is None or shared_secret.strip() == '':
log('waiting for shared secret to be provided by leader')
elif not shared_secret == get_shared_secret():
set_shared_secret(shared_secret)
CONFIGS.write_all()
@hooks.hook('ha-relation-joined')
def ha_joined(relation_id=None):
cluster_config = get_hacluster_config()
delete_resources = []
delete_resources.append('res_ceilometer_polling')
resources = {
'res_ceilometer_haproxy': 'lsb:haproxy',
'res_ceilometer_agent_central': 'lsb:ceilometer-agent-central',
}
resource_params = {
'res_ceilometer_haproxy': 'op monitor interval="5s"',
'res_ceilometer_agent_central': 'op monitor interval="30s"'
}
if config('dns-ha'):
update_dns_ha_resource_params(relation_id=relation_id,
resources=resources,
resource_params=resource_params)
else:
vip_group = []
for vip in cluster_config['vip'].split():
if is_ipv6(vip):
res_ceilometer_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_ceilometer_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
iface = get_iface_for_address(vip)
if iface is not None:
vip_key = 'res_ceilometer_{}_vip'.format(iface)
if vip_key in vip_group:
if vip not in resource_params[vip_key]:
vip_key = '{}_{}'.format(vip_key, vip_params)
else:
log("Resource '%s' (vip='%s') already exists in "
"vip group - skipping" % (vip_key, vip), WARNING)
continue
resources[vip_key] = res_ceilometer_vip
resource_params[vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'
''.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=get_netmask_for_address(vip))
)
vip_group.append(vip_key)
if len(vip_group) >= 1:
relation_set(relation_id=relation_id,
groups={'grp_ceilometer_vips':
' '.join(vip_group)})
init_services = {
'res_ceilometer_haproxy': 'haproxy'
}
clones = {
'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'
}
relation_set(relation_id=relation_id,
init_services=init_services,
corosync_bindiface=cluster_config['ha-bindiface'],
corosync_mcastport=cluster_config['ha-mcastport'],
resources=resources,
resource_params=resource_params,
delete_resources=delete_resources,
clones=clones)
@hooks.hook('ha-relation-changed')
def ha_changed():
clustered = relation_get('clustered')
if not clustered or clustered in [None, 'None', '']:
log('ha_changed: hacluster subordinate not fully clustered.')
else:
log('Cluster configured, notifying other services and updating '
'keystone endpoint configuration')
for rid in relation_ids('identity-service'):
keystone_joined(relid=rid)
@hooks.hook("identity-credentials-relation-joined")
def keystone_credentials_joined(relid=None):
relation_set(relation_id=relid,
username=CEILOMETER_SERVICE,
requested_roles=CEILOMETER_ROLE)
@hooks.hook("identity-service-relation-joined")
def keystone_joined(relid=None):
cmp_codename = CompareOpenStackReleases(
get_os_codename_install_source(config('openstack-origin')))
if cmp_codename >= 'queens':
log('Skipping endpoint registration for >= Queens', level=DEBUG)
return
if config('vip') and not is_clustered():
log('Defering registration until clustered', level=DEBUG)
return
public_url = "{}:{}".format(
canonical_url(CONFIGS, PUBLIC),
CEILOMETER_PORT
)
admin_url = "{}:{}".format(
canonical_url(CONFIGS, ADMIN),
CEILOMETER_PORT
)
internal_url = "{}:{}".format(
canonical_url(CONFIGS, INTERNAL),
CEILOMETER_PORT
)
region = config("region")
relation_set(relation_id=relid,
service=CEILOMETER_SERVICE,
public_url=public_url,
admin_url=admin_url,
internal_url=internal_url,
requested_roles=CEILOMETER_ROLE,
region=region)
@hooks.hook('identity-notifications-relation-changed')
def identity_notifications_changed():
"""Receive notifications from keystone."""
notifications = relation_get()
if not notifications:
return
# Some ceilometer services will create a client and request
# the service catalog from keystone on startup. So if
# endpoints change we need to restart these services.
key = '%s-endpoint-changed' % (CEILOMETER_SERVICE)
if key in notifications:
service_restart('ceilometer-alarm-evaluator')
service_restart('ceilometer-alarm-notifier')
@hooks.hook("ceilometer-service-relation-joined")
def ceilometer_joined():
# Pass local context data onto related agent services
context = get_ceilometer_context()
# This value gets tranformed to a path by the context we need to
# pass the data to agents.
if 'rabbit_ssl_ca' in context:
with open(context['rabbit_ssl_ca']) as fh:
context['rabbit_ssl_ca'] = base64.b64encode(fh.read())
for relid in relation_ids('ceilometer-service'):
relation_set(relid, context)
@hooks.hook('nrpe-external-master-relation-joined',
'nrpe-external-master-relation-changed')
def update_nrpe_config():
# python-dbus is used by check_upstart_job
apt_install('python-dbus')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.copy_nrpe_checks()
nrpe.add_init_service_checks(nrpe_setup, services(), current_unit)
nrpe.add_haproxy_checks(nrpe_setup, current_unit)
nrpe_setup.write()
@hooks.hook('update-status')
@harden()
def update_status():
log('Updating status.')
if __name__ == '__main__':
try:
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e))
assess_status(CONFIGS)
|
py | 1a365153b01ada4fd2a611984db03b5bba9a04b0 | from ast import literal_eval
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from django.conf import settings
from rest_framework import authentication, permissions,\
viewsets, filters, response, status
from rest_framework_extensions.cache.mixins import CacheResponseMixin
from bokeh.embed import autoload_server
from .forms import JobFilter
from .models import Job, Metric, Measurement, VersionedPackage
from .serializers import JobSerializer, MetricSerializer,\
RegressionSerializer
try:
bokeh_url = settings.BOKEH_URL
except AttributeError:
# if not specified use the default which is localhost:5006
bokeh_url = 'default'
class DefaultsMixin(object):
"""
Default settings for view authentication, permissions,
filtering and pagination.
"""
authentication_classes = (
authentication.BasicAuthentication,
authentication.TokenAuthentication,
)
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
)
paginate_by = 100
# list of available filter_backends, will enable these for all ViewSets
filter_backends = (
filters.DjangoFilterBackend,
filters.SearchFilter,
filters.OrderingFilter,
)
class JobViewSet(DefaultsMixin, CacheResponseMixin, viewsets.ModelViewSet):
"""API endpoint for listing and creating jobs"""
queryset = Job.objects.\
prefetch_related('packages', 'measurements').order_by('date')
serializer_class = JobSerializer
filter_class = JobFilter
search_fields = ('ci_id',)
ordering_fields = ('date',)
class MeasurementViewSet(DefaultsMixin, CacheResponseMixin,
viewsets.ModelViewSet):
"""API endpoint consumed by the monitor app"""
queryset = Measurement.objects.\
prefetch_related('job', 'metric').order_by('job__date')
serializer_class = RegressionSerializer
filter_fields = ('job__ci_dataset', 'metric')
class MetricViewSet(DefaultsMixin, CacheResponseMixin, viewsets.ModelViewSet):
"""API endpoint for listing and creating metrics"""
queryset = Metric.objects.order_by('metric')
serializer_class = MetricSerializer
def create(self, request, *args, **kwargs):
# many=True for adding multiple items at once
serializer = self.get_serializer(data=request.data,
many=isinstance(request.data, list))
serializer.is_valid(raise_exception=True)
serializer.save()
return response.Response(serializer.data,
status=status.HTTP_201_CREATED)
search_fields = ('metric', )
ordering_fields = ('metric',)
class DatasetViewSet(DefaultsMixin, viewsets.ViewSet):
"""API endpoint for listing datasets"""
def list(self, request):
datasets = Job.objects.values_list('ci_dataset', flat=True).distinct()
return response.Response(datasets)
class DefaultsViewSet(DefaultsMixin, viewsets.ViewSet):
"""
API endpoint for listing default values used by
the bokeh apps
"""
def get_defaults(self):
queryset = Job.objects.values('ci_id', 'ci_dataset').latest('pk')
ci_id = queryset['ci_id']
ci_dataset = queryset['ci_dataset']
queryset = Metric.objects.values_list('metric', flat=True)
if 'AM1' in queryset:
metric = 'AM1'
else:
metric = queryset.latest('pk')
snr_cut = '100'
window = 'months'
return {'ci_id': ci_id, 'ci_dataset': ci_dataset,
'metric': metric, 'snr_cut': snr_cut,
'window': window}
def list(self, request):
defaults = self.get_defaults()
return response.Response(defaults)
class BokehAppViewSet(DefaultsMixin, viewsets.ViewSet):
def get_app_data(self, ci_id, ci_dataset, metric):
data = {}
blobs = Job.objects.filter(ci_id=ci_id,
ci_dataset=ci_dataset).values('blobs')
metadata = Measurement.\
objects.filter(metric=metric, job__ci_id=ci_id,
job__ci_dataset=ci_dataset).values('metadata')
if metadata.exists():
# workaround for getting item from queryset
metadata = metadata[0]['metadata']
if metadata:
metadata = literal_eval(literal_eval(metadata))
blob_id = metadata.pop('blobs')
data['metadata'] = metadata
if blobs.exists():
# workaround for getting item from queryset
blobs = blobs[0]['blobs']
if blobs:
blobs = literal_eval(literal_eval(blobs))
for blob in blobs:
# Look up for data blobs
if blob['identifier'] == blob_id['matchedDataset']:
data['matchedDataset'] = blob['data']
elif blob['identifier'] == blob_id['photomModel']:
data['photomModel'] = blob['data']
elif blob['identifier'] == blob_id['astromModel']:
data['astromModel'] = blob['data']
return data
def list(self, request):
defaults = DefaultsViewSet().get_defaults()
ci_id = self.request.query_params.get('ci_id',
defaults['ci_id'])
ci_dataset = self.request.query_params.get('ci_dataset',
defaults['ci_dataset'])
metric = self.request.query_params.get('metric',
defaults['metric'])
data = self.get_app_data(ci_id, ci_dataset, metric)
return response.Response(data)
def embed_bokeh(request, bokeh_app):
"""Render the requested app from the bokeh server"""
# http://bokeh.pydata.org/en/0.12.3/docs/reference/embed.html
# TODO: test if bokeh server is reachable
bokeh_script = autoload_server(None, app_path="/{}".format(bokeh_app),
url=bokeh_url)
template = loader.get_template('dashboard/embed_bokeh.html')
context = {'bokeh_script': bokeh_script,
'bokeh_app': bokeh_app}
response = HttpResponse(template.render(context, request))
# Save full url path in the HTTP response, so that the bokeh
# app can use this info, e.g:
# http://localhost:8000/dashboard/AMx/?metric=AM1&ci_dataset=cfht&ci_id=452
response.set_cookie('django_full_path', request.get_full_path())
return response
def home(request):
"""Render the home page"""
n_metrics = len(Metric.objects.all())
job = Job.objects.latest('pk')
n_packages = len(VersionedPackage.objects.filter(job=job))
n_jobs = len(Job.objects.all())
n_meas = len(Measurement.objects.all())
datasets = Job.objects.values_list('ci_dataset', flat=True).distinct()
last = Job.objects.latest('pk').date
context = {"n_metrics": n_metrics,
"n_packages": n_packages,
"n_jobs": n_jobs,
"n_meas": n_meas,
"datasets": ", ".join(datasets),
"last": last}
return render(request, 'dashboard/index.html', context)
|
py | 1a36517c694fc9a927b3bf22eb8cb0d097f6c87e | from django.views.generic import TemplateView
from tulius import models
class CountersIndex(TemplateView):
template_name = 'counters/index.haml'
class CountersBase(TemplateView):
template_name = 'counters/success.haml'
def do_action(self):
raise NotImplementedError()
def get_context_data(self, **kwargs):
self.do_action()
return super().get_context_data(**kwargs)
class PMCounters(CountersBase):
def do_action(self):
for player in models.User.objects.all():
player.update_not_readed()
|
py | 1a36517ee3cd2eaf91c09fbfa6fa0add2d7eba52 | # nuScenes dev-kit.
# Code written by Oscar Beijbom, 2018.
import copy
import os.path as osp
import struct
from abc import ABC, abstractmethod
from functools import reduce
from typing import Tuple, List, Dict
import cv2
import numpy as np
from matplotlib.axes import Axes
from pyquaternion import Quaternion
from nuscenes.utils.geometry_utils import view_points, transform_matrix
class PointCloud(ABC):
"""
Abstract class for manipulating and viewing point clouds.
Every point cloud (lidar and radar) consists of points where:
- Dimensions 0, 1, 2 represent x, y, z coordinates.
These are modified when the point cloud is rotated or translated.
- All other dimensions are optional. Hence these have to be manually modified if the reference frame changes.
"""
def __init__(self, points: np.ndarray):
"""
Initialize a point cloud and check it has the correct dimensions.
:param points: <np.float: d, n>. d-dimensional input point cloud matrix.
"""
assert points.shape[0] == self.nbr_dims(), 'Error: Pointcloud points must have format: %d x n' % self.nbr_dims()
self.points = points
@staticmethod
@abstractmethod
def nbr_dims() -> int:
"""
Returns the number of dimensions.
:return: Number of dimensions.
"""
pass
@classmethod
@abstractmethod
def from_file(cls, file_name: str) -> 'PointCloud':
"""
Loads point cloud from disk.
:param file_name: Path of the pointcloud file on disk.
:return: PointCloud instance.
"""
pass
@classmethod
def from_file_multisweep(cls,
nusc: 'NuScenes',
sample_rec: Dict,
chan: str,
ref_chan: str,
nsweeps: int = 5,
min_distance: float = 1.0) -> Tuple['PointCloud', np.ndarray]:
"""
Return a point cloud that aggregates multiple sweeps.
As every sweep is in a different coordinate frame, we need to map the coordinates to a single reference frame.
As every sweep has a different timestamp, we need to account for that in the transformations and timestamps.
:param nusc: A NuScenes instance.
:param sample_rec: The current sample.
:param chan: The lidar/radar channel from which we track back n sweeps to aggregate the point cloud.
:param ref_chan: The reference channel of the current sample_rec that the point clouds are mapped to.
:param nsweeps: Number of sweeps to aggregated.
:param min_distance: Distance below which points are discarded.
:return: (all_pc, all_times). The aggregated point cloud and timestamps.
"""
# Init.
points = np.zeros((cls.nbr_dims(), 0))
all_pc = cls(points)
all_times = np.zeros((1, 0))
# Get reference pose and timestamp.
ref_sd_token = sample_rec['data'][ref_chan]
ref_sd_rec = nusc.get('sample_data', ref_sd_token)
ref_pose_rec = nusc.get('ego_pose', ref_sd_rec['ego_pose_token'])
ref_cs_rec = nusc.get('calibrated_sensor', ref_sd_rec['calibrated_sensor_token'])
ref_time = 1e-6 * ref_sd_rec['timestamp']
# Homogeneous transform from ego car frame to reference frame.
ref_from_car = transform_matrix(ref_cs_rec['translation'], Quaternion(ref_cs_rec['rotation']), inverse=True)
# Homogeneous transformation matrix from global to _current_ ego car frame.
car_from_global = transform_matrix(ref_pose_rec['translation'], Quaternion(ref_pose_rec['rotation']),
inverse=True)
# Aggregate current and previous sweeps.
sample_data_token = sample_rec['data'][chan]
current_sd_rec = nusc.get('sample_data', sample_data_token)
for _ in range(nsweeps):
# Load up the pointcloud and remove points close to the sensor.
current_pc = cls.from_file(osp.join(nusc.dataroot, current_sd_rec['filename']))
current_pc.remove_close(min_distance)
# Get past pose.
current_pose_rec = nusc.get('ego_pose', current_sd_rec['ego_pose_token'])
global_from_car = transform_matrix(current_pose_rec['translation'],
Quaternion(current_pose_rec['rotation']), inverse=False)
# Homogeneous transformation matrix from sensor coordinate frame to ego car frame.
current_cs_rec = nusc.get('calibrated_sensor', current_sd_rec['calibrated_sensor_token'])
car_from_current = transform_matrix(current_cs_rec['translation'], Quaternion(current_cs_rec['rotation']),
inverse=False)
# Fuse four transformation matrices into one and perform transform.
trans_matrix = reduce(np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current])
current_pc.transform(trans_matrix)
# Add time vector which can be used as a temporal feature.
time_lag = ref_time - 1e-6 * current_sd_rec['timestamp'] # Positive difference.
times = time_lag * np.ones((1, current_pc.nbr_points()))
all_times = np.hstack((all_times, times))
# Merge with key pc.
all_pc.points = np.hstack((all_pc.points, current_pc.points))
# Abort if there are no previous sweeps.
if current_sd_rec['prev'] == '':
break
else:
current_sd_rec = nusc.get('sample_data', current_sd_rec['prev'])
return all_pc, all_times
def nbr_points(self) -> int:
"""
Returns the number of points.
:return: Number of points.
"""
return self.points.shape[1]
def subsample(self, ratio: float) -> None:
"""
Sub-samples the pointcloud.
:param ratio: Fraction to keep.
"""
selected_ind = np.random.choice(np.arange(0, self.nbr_points()), size=int(self.nbr_points() * ratio))
self.points = self.points[:, selected_ind]
def remove_close(self, radius: float) -> None:
"""
Removes point too close within a certain radius from origin.
:param radius: Radius below which points are removed.
"""
x_filt = np.abs(self.points[0, :]) < radius
y_filt = np.abs(self.points[1, :]) < radius
not_close = np.logical_not(np.logical_and(x_filt, y_filt))
self.points = self.points[:, not_close]
def translate(self, x: np.ndarray) -> None:
"""
Applies a translation to the point cloud.
:param x: <np.float: 3, 1>. Translation in x, y, z.
"""
for i in range(3):
self.points[i, :] = self.points[i, :] + x[i]
def rotate(self, rot_matrix: np.ndarray) -> None:
"""
Applies a rotation.
:param rot_matrix: <np.float: 3, 3>. Rotation matrix.
"""
self.points[:3, :] = np.dot(rot_matrix, self.points[:3, :])
def transform(self, transf_matrix: np.ndarray) -> None:
"""
Applies a homogeneous transform.
:param transf_matrix: <np.float: 4, 4>. Homogenous transformation matrix.
"""
self.points[:3, :] = transf_matrix.dot(np.vstack((self.points[:3, :], np.ones(self.nbr_points()))))[:3, :]
def render_height(self,
ax: Axes,
view: np.ndarray = np.eye(4),
x_lim: Tuple[float, float] = (-20, 20),
y_lim: Tuple[float, float] = (-20, 20),
marker_size: float = 1) -> None:
"""
Very simple method that applies a transformation and then scatter plots the points colored by height (z-value).
:param ax: Axes on which to render the points.
:param view: <np.float: n, n>. Defines an arbitrary projection (n <= 4).
:param x_lim: (min, max). x range for plotting.
:param y_lim: (min, max). y range for plotting.
:param marker_size: Marker size.
"""
self._render_helper(2, ax, view, x_lim, y_lim, marker_size)
def render_intensity(self,
ax: Axes,
view: np.ndarray = np.eye(4),
x_lim: Tuple[float, float] = (-20, 20),
y_lim: Tuple[float, float] = (-20, 20),
marker_size: float = 1) -> None:
"""
Very simple method that applies a transformation and then scatter plots the points colored by intensity.
:param ax: Axes on which to render the points.
:param view: <np.float: n, n>. Defines an arbitrary projection (n <= 4).
:param x_lim: (min, max).
:param y_lim: (min, max).
:param marker_size: Marker size.
"""
self._render_helper(3, ax, view, x_lim, y_lim, marker_size)
def _render_helper(self,
color_channel: int,
ax: Axes,
view: np.ndarray,
x_lim: Tuple[float, float],
y_lim: Tuple[float, float],
marker_size: float) -> None:
"""
Helper function for rendering.
:param color_channel: Point channel to use as color.
:param ax: Axes on which to render the points.
:param view: <np.float: n, n>. Defines an arbitrary projection (n <= 4).
:param x_lim: (min, max).
:param y_lim: (min, max).
:param marker_size: Marker size.
"""
points = view_points(self.points[:3, :], view, normalize=False)
ax.scatter(points[0, :], points[1, :], c=self.points[color_channel, :], s=marker_size)
ax.set_xlim(x_lim[0], x_lim[1])
ax.set_ylim(y_lim[0], y_lim[1])
class LidarPointCloud(PointCloud):
@staticmethod
def nbr_dims() -> int:
"""
Returns the number of dimensions.
:return: Number of dimensions.
"""
return 4
@classmethod
def from_file(cls, file_name: str) -> 'LidarPointCloud':
"""
Loads LIDAR data from binary numpy format. Data is stored as (x, y, z, intensity, ring index).
:param file_name: Path of the pointcloud file on disk.
:return: LidarPointCloud instance (x, y, z, intensity).
"""
assert file_name.endswith('.bin'), 'Unsupported filetype {}'.format(file_name)
scan = np.fromfile(file_name, dtype=np.float32)
points = scan.reshape((-1, 5))[:, :cls.nbr_dims()]
return cls(points.T)
class RadarPointCloud(PointCloud):
# Class-level settings for radar pointclouds, see from_file().
invalid_states = [0] # type: List[int]
dynprop_states = range(7) # type: List[int] # Use [0, 2, 6] for moving objects only.
ambig_states = [3] # type: List[int]
@classmethod
def disable_filters(cls) -> None:
"""
Disable all radar filter settings.
Use this method to plot all radar returns.
Note that this method affects the global settings.
"""
cls.invalid_states = list(range(18))
cls.dynprop_states = list(range(8))
cls.ambig_states = list(range(5))
@classmethod
def default_filters(cls) -> None:
"""
Set the defaults for all radar filter settings.
Note that this method affects the global settings.
"""
cls.invalid_states = [0]
cls.dynprop_states = range(7)
cls.ambig_states = [3]
@staticmethod
def nbr_dims() -> int:
"""
Returns the number of dimensions.
:return: Number of dimensions.
"""
return 18
@classmethod
def from_file(cls,
file_name: str,
invalid_states: List[int] = None,
dynprop_states: List[int] = None,
ambig_states: List[int] = None) -> 'RadarPointCloud':
"""
Loads RADAR data from a Point Cloud Data file. See details below.
:param file_name: The path of the pointcloud file.
:param invalid_states: Radar states to be kept. See details below.
:param dynprop_states: Radar states to be kept. Use [0, 2, 6] for moving objects only. See details below.
:param ambig_states: Radar states to be kept. See details below.
To keep all radar returns, set each state filter to range(18).
:return: <np.float: d, n>. Point cloud matrix with d dimensions and n points.
Example of the header fields:
# .PCD v0.7 - Point Cloud Data file format
VERSION 0.7
FIELDS x y z dyn_prop id rcs vx vy vx_comp vy_comp is_quality_valid ambig_state x_rms y_rms invalid_state pdh0 vx_rms vy_rms
SIZE 4 4 4 1 2 4 4 4 4 4 1 1 1 1 1 1 1 1
TYPE F F F I I F F F F F I I I I I I I I
COUNT 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
WIDTH 125
HEIGHT 1
VIEWPOINT 0 0 0 1 0 0 0
POINTS 125
DATA binary
Below some of the fields are explained in more detail:
x is front, y is left
vx, vy are the velocities in m/s.
vx_comp, vy_comp are the velocities in m/s compensated by the ego motion.
We recommend using the compensated velocities.
invalid_state: state of Cluster validity state.
(Invalid states)
0x01 invalid due to low RCS
0x02 invalid due to near-field artefact
0x03 invalid far range cluster because not confirmed in near range
0x05 reserved
0x06 invalid cluster due to high mirror probability
0x07 Invalid cluster because outside sensor field of view
0x0d reserved
0x0e invalid cluster because it is a harmonics
(Valid states)
0x00 valid
0x04 valid cluster with low RCS
0x08 valid cluster with azimuth correction due to elevation
0x09 valid cluster with high child probability
0x0a valid cluster with high probability of being a 50 deg artefact
0x0b valid cluster but no local maximum
0x0c valid cluster with high artefact probability
0x0f valid cluster with above 95m in near range
0x10 valid cluster with high multi-target probability
0x11 valid cluster with suspicious angle
dynProp: Dynamic property of cluster to indicate if is moving or not.
0: moving
1: stationary
2: oncoming
3: stationary candidate
4: unknown
5: crossing stationary
6: crossing moving
7: stopped
ambig_state: State of Doppler (radial velocity) ambiguity solution.
0: invalid
1: ambiguous
2: staggered ramp
3: unambiguous
4: stationary candidates
pdh0: False alarm probability of cluster (i.e. probability of being an artefact caused by multipath or similar).
0: invalid
1: <25%
2: 50%
3: 75%
4: 90%
5: 99%
6: 99.9%
7: <=100%
"""
assert file_name.endswith('.pcd'), 'Unsupported filetype {}'.format(file_name)
meta = []
with open(file_name, 'rb') as f:
for line in f:
line = line.strip().decode('utf-8')
meta.append(line)
if line.startswith('DATA'):
break
data_binary = f.read()
# Get the header rows and check if they appear as expected.
assert meta[0].startswith('#'), 'First line must be comment'
assert meta[1].startswith('VERSION'), 'Second line must be VERSION'
sizes = meta[3].split(' ')[1:]
types = meta[4].split(' ')[1:]
counts = meta[5].split(' ')[1:]
width = int(meta[6].split(' ')[1])
height = int(meta[7].split(' ')[1])
data = meta[10].split(' ')[1]
feature_count = len(types)
assert width > 0
assert len([c for c in counts if c != c]) == 0, 'Error: COUNT not supported!'
assert height == 1, 'Error: height != 0 not supported!'
assert data == 'binary'
# Lookup table for how to decode the binaries.
unpacking_lut = {'F': {2: 'e', 4: 'f', 8: 'd'},
'I': {1: 'b', 2: 'h', 4: 'i', 8: 'q'},
'U': {1: 'B', 2: 'H', 4: 'I', 8: 'Q'}}
types_str = ''.join([unpacking_lut[t][int(s)] for t, s in zip(types, sizes)])
# Decode each point.
offset = 0
point_count = width
points = []
for i in range(point_count):
point = []
for p in range(feature_count):
start_p = offset
end_p = start_p + int(sizes[p])
assert end_p < len(data_binary)
point_p = struct.unpack(types_str[p], data_binary[start_p:end_p])[0]
point.append(point_p)
offset = end_p
points.append(point)
# A NaN in the first point indicates an empty pointcloud.
point = np.array(points[0])
if np.any(np.isnan(point)):
return cls(np.zeros((feature_count, 0)))
# Convert to numpy matrix.
points = np.array(points).transpose()
# If no parameters are provided, use default settings.
invalid_states = cls.invalid_states if invalid_states is None else invalid_states
dynprop_states = cls.dynprop_states if dynprop_states is None else dynprop_states
ambig_states = cls.ambig_states if ambig_states is None else ambig_states
# Filter points with an invalid state.
valid = [p in invalid_states for p in points[-4, :]]
points = points[:, valid]
# Filter by dynProp.
valid = [p in dynprop_states for p in points[3, :]]
points = points[:, valid]
# Filter by ambig_state.
valid = [p in ambig_states for p in points[11, :]]
points = points[:, valid]
return cls(points)
class Box:
""" Simple data class representing a 3d box including, label, score and velocity. """
def __init__(self,
center: List[float],
size: List[float],
orientation: Quaternion,
label: int = np.nan,
score: float = np.nan,
velocity: Tuple = (np.nan, np.nan, np.nan),
name: str = None,
token: str = None):
"""
:param center: Center of box given as x, y, z.
:param size: Size of box in width, length, height.
:param orientation: Box orientation.
:param label: Integer label, optional.
:param score: Classification score, optional.
:param velocity: Box velocity in x, y, z direction.
:param name: Box name, optional. Can be used e.g. for denote category name.
:param token: Unique string identifier from DB.
"""
assert not np.any(np.isnan(center))
assert not np.any(np.isnan(size))
assert len(center) == 3
assert len(size) == 3
assert type(orientation) == Quaternion
self.center = np.array(center)
self.wlh = np.array(size)
self.orientation = orientation
self.label = int(label) if not np.isnan(label) else label
self.score = float(score) if not np.isnan(score) else score
self.velocity = np.array(velocity)
self.name = name
self.token = token
def __eq__(self, other):
center = np.allclose(self.center, other.center)
wlh = np.allclose(self.wlh, other.wlh)
orientation = np.allclose(self.orientation.elements, other.orientation.elements)
label = (self.label == other.label) or (np.isnan(self.label) and np.isnan(other.label))
score = (self.score == other.score) or (np.isnan(self.score) and np.isnan(other.score))
vel = (np.allclose(self.velocity, other.velocity) or
(np.all(np.isnan(self.velocity)) and np.all(np.isnan(other.velocity))))
return center and wlh and orientation and label and score and vel
def __repr__(self):
repr_str = 'label: {}, score: {:.2f}, xyz: [{:.2f}, {:.2f}, {:.2f}], wlh: [{:.2f}, {:.2f}, {:.2f}], ' \
'rot axis: [{:.2f}, {:.2f}, {:.2f}], ang(degrees): {:.2f}, ang(rad): {:.2f}, ' \
'vel: {:.2f}, {:.2f}, {:.2f}, name: {}, token: {}'
return repr_str.format(self.label, self.score, self.center[0], self.center[1], self.center[2], self.wlh[0],
self.wlh[1], self.wlh[2], self.orientation.axis[0], self.orientation.axis[1],
self.orientation.axis[2], self.orientation.degrees, self.orientation.radians,
self.velocity[0], self.velocity[1], self.velocity[2], self.name, self.token)
@property
def rotation_matrix(self) -> np.ndarray:
"""
Return a rotation matrix.
:return: <np.float: 3, 3>. The box's rotation matrix.
"""
return self.orientation.rotation_matrix
def translate(self, x: np.ndarray) -> None:
"""
Applies a translation.
:param x: <np.float: 3, 1>. Translation in x, y, z direction.
"""
self.center += x
def rotate(self, quaternion: Quaternion) -> None:
"""
Rotates box.
:param quaternion: Rotation to apply.
"""
self.center = np.dot(quaternion.rotation_matrix, self.center)
self.orientation = quaternion * self.orientation
self.velocity = np.dot(quaternion.rotation_matrix, self.velocity)
def corners(self, wlh_factor: float = 1.0):
"""
Returns the bounding box corners.
:param wlh_factor: Multiply w, l, h by a factor to scale the box.
:return: <np.float: 3, 8>. First four corners are the ones facing forward.
The last four are the ones facing backwards.
"""
w, l, h = self.wlh * wlh_factor
# 3D bounding box corners. (Convention: x points forward, y to the left, z up.)
x_corners = l / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])
y_corners = w / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1])
z_corners = h / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])
corners = np.vstack((x_corners, y_corners, z_corners))
# Rotate
corners = np.dot(self.orientation.rotation_matrix, corners)
# Translate
x, y, z = self.center
corners[0, :] = corners[0, :] + x
corners[1, :] = corners[1, :] + y
corners[2, :] = corners[2, :] + z
return corners
def extremePoints(self,
view: np.ndarray = np.eye(3),
normalize: bool = False):
corners = view_points(self.corners(), view, normalize=normalize)[:2, :]
# corners.sort(key = lambda x : abs(x[0] - self.center[0]), reverse = True)
corners = corners.T
corners = corners[corners[:,0].argsort()]
corners = np.array([corners[0],corners[1],corners[-1],corners[-2]])
corners = corners.T
l = np.min(corners[0]) # left limit
r = np.max(corners[0]) # right limit
t = np.max(corners[1]) # top limit
b = np.min(corners[1]) # bottom limit
return np.array([[l,b], [r,b], [r,t], [l,t]])
def get2Dbox(self,
axis: Axes,
view: np.ndarray = np.eye(3),
normalize: bool = False,
colors: Tuple = ('b', 'r', 'k'),
linewidth: float = 2):
corners = self.extremePoints(view=view, normalize=normalize)
def draw_rect(selected_corners, color):
prev = selected_corners[-1]
for corner in selected_corners:
axis.plot([prev[0], corner[0]], [prev[1], corner[1]], color=color, linewidth=linewidth)
prev = corner
draw_rect(corners, colors[0])
# def draw_rect(selected_corners, color):
# prev = selected_corners[-1]
# for corner in selected_corners:
# axis.plot([prev[0], corner[0]], [prev[1], corner[1]], color=color, linewidth=linewidth)
# prev = corner
def bottom_corners(self) -> np.ndarray:
"""
Returns the four bottom corners.
:return: <np.float: 3, 4>. Bottom corners. First two face forward, last two face backwards.
"""
return self.corners()[:, [2, 3, 7, 6]]
def render(self,
axis: Axes,
view: np.ndarray = np.eye(3),
normalize: bool = False,
colors: Tuple = ('b', 'r', 'k'),
linewidth: float = 2) -> None:
"""
Renders the box in the provided Matplotlib axis.
:param axis: Axis onto which the box should be drawn.
:param view: <np.array: 3, 3>. Define a projection in needed (e.g. for drawing projection in an image).
:param normalize: Whether to normalize the remaining coordinate.
:param colors: (<Matplotlib.colors>: 3). Valid Matplotlib colors (<str> or normalized RGB tuple) for front,
back and sides.
:param linewidth: Width in pixel of the box sides.
"""
corners = view_points(self.corners(), view, normalize=normalize)[:2, :]
def draw_rect(selected_corners, color):
prev = selected_corners[-1]
for corner in selected_corners:
axis.plot([prev[0], corner[0]], [prev[1], corner[1]], color=color, linewidth=linewidth)
prev = corner
# Draw the sides
for i in range(4):
axis.plot([corners.T[i][0], corners.T[i + 4][0]],
[corners.T[i][1], corners.T[i + 4][1]],
color=colors[2], linewidth=linewidth)
# Draw front (first 4 corners) and rear (last 4 corners) rectangles(3d)/lines(2d)
# print(corners.shape)
draw_rect(corners.T[:4], colors[0])
draw_rect(corners.T[4:], colors[0])
# Draw line indicating the front
center_bottom_forward = np.mean(corners.T[2:4], axis=0)
center_bottom = np.mean(corners.T[[2, 3, 7, 6]], axis=0)
axis.plot([center_bottom[0], center_bottom_forward[0]],
[center_bottom[1], center_bottom_forward[1]],
color=colors[0], linewidth=linewidth)
def render_cv2(self,
im: np.ndarray,
view: np.ndarray = np.eye(3),
normalize: bool = False,
colors: Tuple = ((0, 0, 255), (255, 0, 0), (155, 155, 155)),
linewidth: int = 2) -> None:
"""
Renders box using OpenCV2.
:param im: <np.array: width, height, 3>. Image array. Channels are in BGR order.
:param view: <np.array: 3, 3>. Define a projection if needed (e.g. for drawing projection in an image).
:param normalize: Whether to normalize the remaining coordinate.
:param colors: ((R, G, B), (R, G, B), (R, G, B)). Colors for front, side & rear.
:param linewidth: Linewidth for plot.
"""
corners = view_points(self.corners(), view, normalize=normalize)[:2, :]
def draw_rect(selected_corners, color):
prev = selected_corners[-1]
for corner in selected_corners:
cv2.line(im,
(int(prev[0]), int(prev[1])),
(int(corner[0]), int(corner[1])),
color, linewidth)
prev = corner
# Draw the sides
for i in range(4):
cv2.line(im,
(int(corners.T[i][0]), int(corners.T[i][1])),
(int(corners.T[i + 4][0]), int(corners.T[i + 4][1])),
colors[2][::-1], linewidth)
# Draw front (first 4 corners) and rear (last 4 corners) rectangles(3d)/lines(2d)
draw_rect(corners.T[:4], colors[0][::-1])
draw_rect(corners.T[4:], colors[1][::-1])
# Draw line indicating the front
center_bottom_forward = np.mean(corners.T[2:4], axis=0)
center_bottom = np.mean(corners.T[[2, 3, 7, 6]], axis=0)
cv2.line(im,
(int(center_bottom[0]), int(center_bottom[1])),
(int(center_bottom_forward[0]), int(center_bottom_forward[1])),
colors[0][::-1], linewidth)
def copy(self) -> 'Box':
"""
Create a copy of self.
:return: A copy.
"""
return copy.deepcopy(self)
|
py | 1a36522bf2a1c872036bbca977c3c42a27593ea3 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The CenterNet meta architecture as described in the "Objects as Points" paper [1].
[1]: https://arxiv.org/abs/1904.07850
"""
import abc
import collections
import functools
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner as cn_assigner
from object_detection.utils import shape_utils
# Number of channels needed to predict size and offsets.
NUM_OFFSET_CHANNELS = 2
NUM_SIZE_CHANNELS = 2
# Error range for detecting peaks.
PEAK_EPSILON = 1e-6
# Constants shared between all keypoint tasks.
UNMATCHED_KEYPOINT_SCORE = 0.1
KEYPOINT_CANDIDATE_SEARCH_SCALE = 0.3
class CenterNetFeatureExtractor(tf.keras.Model):
"""Base class for feature extractors for the CenterNet meta architecture.
Child classes are expected to override the _output_model property which will
return 1 or more tensors predicted by the feature extractor.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, name=None, channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.), bgr_ordering=False):
"""Initializes a CenterNet feature extractor.
Args:
name: str, the name used for the underlying keras model.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it. If None or empty, we use 0s.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
If None or empty, we use 1s.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
"""
super(CenterNetFeatureExtractor, self).__init__(name=name)
if channel_means is None or len(channel_means) == 0: # pylint:disable=g-explicit-length-test
channel_means = [0., 0., 0.]
if channel_stds is None or len(channel_stds) == 0: # pylint:disable=g-explicit-length-test
channel_stds = [1., 1., 1.]
self._channel_means = channel_means
self._channel_stds = channel_stds
self._bgr_ordering = bgr_ordering
def preprocess(self, inputs):
"""Converts a batch of unscaled images to a scale suitable for the model.
This method normalizes the image using the given `channel_means` and
`channels_stds` values at initialization time while optionally flipping
the channel order if `bgr_ordering` is set.
Args:
inputs: a [batch, height, width, channels] float32 tensor
Returns:
outputs: a [batch, height, width, channels] float32 tensor
"""
if self._bgr_ordering:
red, green, blue = tf.unstack(inputs, axis=3)
inputs = tf.stack([blue, green, red], axis=3)
channel_means = tf.reshape(tf.constant(self._channel_means),
[1, 1, 1, -1])
channel_stds = tf.reshape(tf.constant(self._channel_stds),
[1, 1, 1, -1])
return (inputs - channel_means)/channel_stds
@property
@abc.abstractmethod
def out_stride(self):
"""The stride in the output image of the network."""
pass
@property
@abc.abstractmethod
def num_feature_outputs(self):
"""Ther number of feature outputs returned by the feature extractor."""
pass
@property
@abc.abstractmethod
def supported_sub_model_types(self):
"""Valid sub model types supported by the get_sub_model function."""
pass
@abc.abstractmethod
def get_sub_model(self, sub_model_type):
"""Returns the underlying keras model for the given sub_model_type.
This function is useful when we only want to get a subset of weights to
be restored from a checkpoint.
Args:
sub_model_type: string, the type of sub model. Currently, CenterNet
feature extractors support 'detection' and 'classification'.
"""
pass
def make_prediction_net(num_out_channels, kernel_size=3, num_filters=256,
bias_fill=None, use_depthwise=False, name=None):
"""Creates a network to predict the given number of output channels.
This function is intended to make the prediction heads for the CenterNet
meta architecture.
Args:
num_out_channels: Number of output channels.
kernel_size: The size of the conv kernel in the intermediate layer
num_filters: The number of filters in the intermediate conv layer.
bias_fill: If not None, is used to initialize the bias in the final conv
layer.
use_depthwise: If true, use SeparableConv2D to construct the Sequential
layers instead of Conv2D.
name: Optional name for the prediction net.
Returns:
net: A keras module which when called on an input tensor of size
[batch_size, height, width, num_in_channels] returns an output
of size [batch_size, height, width, num_out_channels]
"""
if use_depthwise:
conv_fn = tf.keras.layers.SeparableConv2D
else:
conv_fn = tf.keras.layers.Conv2D
out_conv = tf.keras.layers.Conv2D(num_out_channels, kernel_size=1)
if bias_fill is not None:
out_conv.bias_initializer = tf.keras.initializers.constant(bias_fill)
net = tf.keras.Sequential(
[conv_fn(num_filters, kernel_size=kernel_size, padding='same'),
tf.keras.layers.ReLU(),
out_conv],
name=name)
return net
def _to_float32(x):
return tf.cast(x, tf.float32)
def _get_shape(tensor, num_dims):
tf.Assert(tensor.get_shape().ndims == num_dims, [tensor])
return shape_utils.combined_static_and_dynamic_shape(tensor)
def _flatten_spatial_dimensions(batch_images):
batch_size, height, width, channels = _get_shape(batch_images, 4)
return tf.reshape(batch_images, [batch_size, height * width,
channels])
def _multi_range(limit,
value_repetitions=1,
range_repetitions=1,
dtype=tf.int32):
"""Creates a sequence with optional value duplication and range repetition.
As an example (see the Args section for more details),
_multi_range(limit=2, value_repetitions=3, range_repetitions=4) returns:
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]
Args:
limit: A 0-D Tensor (scalar). Upper limit of sequence, exclusive.
value_repetitions: Integer. The number of times a value in the sequence is
repeated. With value_repetitions=3, the result is [0, 0, 0, 1, 1, 1, ..].
range_repetitions: Integer. The number of times the range is repeated. With
range_repetitions=3, the result is [0, 1, 2, .., 0, 1, 2, ..].
dtype: The type of the elements of the resulting tensor.
Returns:
A 1-D tensor of type `dtype` and size
[`limit` * `value_repetitions` * `range_repetitions`] that contains the
specified range with given repetitions.
"""
return tf.reshape(
tf.tile(
tf.expand_dims(tf.range(limit, dtype=dtype), axis=-1),
multiples=[range_repetitions, value_repetitions]), [-1])
def top_k_feature_map_locations(feature_map, max_pool_kernel_size=3, k=100,
per_channel=False):
"""Returns the top k scores and their locations in a feature map.
Given a feature map, the top k values (based on activation) are returned. If
`per_channel` is True, the top k values **per channel** are returned.
The `max_pool_kernel_size` argument allows for selecting local peaks in a
region. This filtering is done per channel, so nothing prevents two values at
the same location to be returned.
Args:
feature_map: [batch, height, width, channels] float32 feature map.
max_pool_kernel_size: integer, the max pool kernel size to use to pull off
peak score locations in a neighborhood (independently for each channel).
For example, to make sure no two neighboring values (in the same channel)
are returned, set max_pool_kernel_size=3. If None or 1, will not apply max
pooling.
k: The number of highest scoring locations to return.
per_channel: If True, will return the top k scores and locations per
feature map channel. If False, the top k across the entire feature map
(height x width x channels) are returned.
Returns:
Tuple of
scores: A [batch, N] float32 tensor with scores from the feature map in
descending order. If per_channel is False, N = k. Otherwise,
N = k * channels, and the first k elements correspond to channel 0, the
second k correspond to channel 1, etc.
y_indices: A [batch, N] int tensor with y indices of the top k feature map
locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
x_indices: A [batch, N] int tensor with x indices of the top k feature map
locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
channel_indices: A [batch, N] int tensor with channel indices of the top k
feature map locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
"""
if not max_pool_kernel_size or max_pool_kernel_size == 1:
feature_map_peaks = feature_map
else:
feature_map_max_pool = tf.nn.max_pool(
feature_map, ksize=max_pool_kernel_size, strides=1, padding='SAME')
feature_map_peak_mask = tf.math.abs(
feature_map - feature_map_max_pool) < PEAK_EPSILON
# Zero out everything that is not a peak.
feature_map_peaks = (
feature_map * _to_float32(feature_map_peak_mask))
batch_size, _, width, num_channels = _get_shape(feature_map, 4)
if per_channel:
# Perform top k over batch and channels.
feature_map_peaks_transposed = tf.transpose(feature_map_peaks,
perm=[0, 3, 1, 2])
feature_map_peaks_transposed = tf.reshape(
feature_map_peaks_transposed, [batch_size, num_channels, -1])
scores, peak_flat_indices = tf.math.top_k(feature_map_peaks_transposed, k=k)
# Convert the indices such that they represent the location in the full
# (flattened) feature map of size [batch, height * width * channels].
channel_idx = tf.range(num_channels)[tf.newaxis, :, tf.newaxis]
peak_flat_indices = num_channels * peak_flat_indices + channel_idx
scores = tf.reshape(scores, [batch_size, -1])
peak_flat_indices = tf.reshape(peak_flat_indices, [batch_size, -1])
else:
feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1])
scores, peak_flat_indices = tf.math.top_k(feature_map_peaks_flat, k=k)
# Get x, y and channel indices corresponding to the top indices in the flat
# array.
y_indices, x_indices, channel_indices = (
row_col_channel_indices_from_flattened_indices(
peak_flat_indices, width, num_channels))
return scores, y_indices, x_indices, channel_indices
def prediction_tensors_to_boxes(detection_scores, y_indices, x_indices,
channel_indices, height_width_predictions,
offset_predictions):
"""Converts CenterNet class-center, offset and size predictions to boxes.
Args:
detection_scores: A [batch, num_boxes] float32 tensor with detection
scores in range [0, 1].
y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to
object center locations (expressed in output coordinate frame).
x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to
object center locations (expressed in output coordinate frame).
channel_indices: A [batch, num_boxes] int32 tensor with channel indices
corresponding to object classes.
height_width_predictions: A float tensor of shape [batch_size, height,
width, 2] representing the height and width of a box centered at each
pixel.
offset_predictions: A float tensor of shape [batch_size, height, width, 2]
representing the y and x offsets of a box centered at each pixel. This
helps reduce the error from downsampling.
Returns:
detection_boxes: A tensor of shape [batch_size, num_boxes, 4] holding the
the raw bounding box coordinates of boxes.
detection_classes: An integer tensor of shape [batch_size, num_boxes]
indicating the predicted class for each box.
detection_scores: A float tensor of shape [batch_size, num_boxes] indicating
the score for each box.
num_detections: An integer tensor of shape [batch_size,] indicating the
number of boxes detected for each sample in the batch.
"""
batch_size, num_boxes = _get_shape(y_indices, 2)
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_boxes),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
new_height_width = tf.gather_nd(height_width_predictions, combined_indices)
new_height_width = tf.reshape(new_height_width, [batch_size, num_boxes, -1])
new_offsets = tf.gather_nd(offset_predictions, combined_indices)
offsets = tf.reshape(new_offsets, [batch_size, num_boxes, -1])
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
height_width = tf.maximum(new_height_width, 0)
heights, widths = tf.unstack(height_width, axis=2)
y_offsets, x_offsets = tf.unstack(offsets, axis=2)
detection_classes = channel_indices
num_detections = tf.reduce_sum(tf.to_int32(detection_scores > 0), axis=1)
boxes = tf.stack([y_indices + y_offsets - heights / 2.0,
x_indices + x_offsets - widths / 2.0,
y_indices + y_offsets + heights / 2.0,
x_indices + x_offsets + widths / 2.0], axis=2)
return boxes, detection_classes, detection_scores, num_detections
def prediction_tensors_to_temporal_offsets(
y_indices, x_indices, offset_predictions):
"""Converts CenterNet temporal offset map predictions to batched format.
This function is similar to the box offset conversion function, as both
temporal offsets and box offsets are size-2 vectors.
Args:
y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to
object center locations (expressed in output coordinate frame).
x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to
object center locations (expressed in output coordinate frame).
offset_predictions: A float tensor of shape [batch_size, height, width, 2]
representing the y and x offsets of a box's center across adjacent frames.
Returns:
offsets: A tensor of shape [batch_size, num_boxes, 2] holding the
the object temporal offsets of (y, x) dimensions.
"""
batch_size, num_boxes = _get_shape(y_indices, 2)
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_boxes),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
new_offsets = tf.gather_nd(offset_predictions, combined_indices)
offsets = tf.reshape(new_offsets, [batch_size, num_boxes, -1])
return offsets
def prediction_tensors_to_keypoint_candidates(
keypoint_heatmap_predictions,
keypoint_heatmap_offsets,
keypoint_score_threshold=0.1,
max_pool_kernel_size=1,
max_candidates=20):
"""Convert keypoint heatmap predictions and offsets to keypoint candidates.
Args:
keypoint_heatmap_predictions: A float tensor of shape [batch_size, height,
width, num_keypoints] representing the per-keypoint heatmaps.
keypoint_heatmap_offsets: A float tensor of shape [batch_size, height,
width, 2] (or [batch_size, height, width, 2 * num_keypoints] if
'per_keypoint_offset' is set True) representing the per-keypoint offsets.
keypoint_score_threshold: float, the threshold for considering a keypoint
a candidate.
max_pool_kernel_size: integer, the max pool kernel size to use to pull off
peak score locations in a neighborhood. For example, to make sure no two
neighboring values for the same keypoint are returned, set
max_pool_kernel_size=3. If None or 1, will not apply any local filtering.
max_candidates: integer, maximum number of keypoint candidates per
keypoint type.
Returns:
keypoint_candidates: A tensor of shape
[batch_size, max_candidates, num_keypoints, 2] holding the
location of keypoint candidates in [y, x] format (expressed in absolute
coordinates in the output coordinate frame).
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] with the scores for each
keypoint candidate. The scores come directly from the heatmap predictions.
num_keypoint_candidates: An integer tensor of shape
[batch_size, num_keypoints] with the number of candidates for each
keypoint type, as it's possible to filter some candidates due to the score
threshold.
"""
batch_size, _, _, num_keypoints = _get_shape(keypoint_heatmap_predictions, 4)
# Get x, y and channel indices corresponding to the top indices in the
# keypoint heatmap predictions.
# Note that the top k candidates are produced for **each keypoint type**.
# Might be worth eventually trying top k in the feature map, independent of
# the keypoint type.
keypoint_scores, y_indices, x_indices, channel_indices = (
top_k_feature_map_locations(keypoint_heatmap_predictions,
max_pool_kernel_size=max_pool_kernel_size,
k=max_candidates,
per_channel=True))
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
_, num_indices = _get_shape(y_indices, 2)
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_indices),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
selected_offsets_flat = tf.gather_nd(keypoint_heatmap_offsets,
combined_indices)
selected_offsets = tf.reshape(selected_offsets_flat,
[batch_size, num_indices, -1])
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
_, _, num_channels = _get_shape(selected_offsets, 3)
if num_channels > 2:
# Offsets are per keypoint and the last dimension of selected_offsets
# contains all those offsets, so reshape the offsets to make sure that the
# last dimension contains (y_offset, x_offset) for a single keypoint.
reshaped_offsets = tf.reshape(selected_offsets,
[batch_size, num_indices, -1, 2])
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that. In this
# case, channel_indices indicates which keypoint to use the offset from.
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_indices),
_multi_range(num_indices, range_repetitions=batch_size),
tf.reshape(channel_indices, [-1])
], axis=1)
offsets = tf.gather_nd(reshaped_offsets, combined_indices)
offsets = tf.reshape(offsets, [batch_size, num_indices, -1])
else:
offsets = selected_offsets
y_offsets, x_offsets = tf.unstack(offsets, axis=2)
keypoint_candidates = tf.stack([y_indices + y_offsets,
x_indices + x_offsets], axis=2)
keypoint_candidates = tf.reshape(
keypoint_candidates,
[batch_size, num_keypoints, max_candidates, 2])
keypoint_candidates = tf.transpose(keypoint_candidates, [0, 2, 1, 3])
keypoint_scores = tf.reshape(
keypoint_scores,
[batch_size, num_keypoints, max_candidates])
keypoint_scores = tf.transpose(keypoint_scores, [0, 2, 1])
num_candidates = tf.reduce_sum(
tf.to_int32(keypoint_scores >= keypoint_score_threshold), axis=1)
return keypoint_candidates, keypoint_scores, num_candidates
def regressed_keypoints_at_object_centers(regressed_keypoint_predictions,
y_indices, x_indices):
"""Returns the regressed keypoints at specified object centers.
The original keypoint predictions are regressed relative to each feature map
location. The returned keypoints are expressed in absolute coordinates in the
output frame (i.e. the center offsets are added to each individual regressed
set of keypoints).
Args:
regressed_keypoint_predictions: A float tensor of shape
[batch_size, height, width, 2 * num_keypoints] holding regressed
keypoints. The last dimension has keypoint coordinates ordered as follows:
[y0, x0, y1, x1, ..., y{J-1}, x{J-1}] where J is the number of keypoints.
y_indices: A [batch, num_instances] int tensor holding y indices for object
centers. These indices correspond to locations in the output feature map.
x_indices: A [batch, num_instances] int tensor holding x indices for object
centers. These indices correspond to locations in the output feature map.
Returns:
A float tensor of shape [batch_size, num_objects, 2 * num_keypoints] where
regressed keypoints are gathered at the provided locations, and converted
to absolute coordinates in the output coordinate frame.
"""
batch_size, num_instances = _get_shape(y_indices, 2)
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_instances),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
relative_regressed_keypoints = tf.gather_nd(regressed_keypoint_predictions,
combined_indices)
relative_regressed_keypoints = tf.reshape(
relative_regressed_keypoints,
[batch_size, num_instances, -1, 2])
relative_regressed_keypoints_y, relative_regressed_keypoints_x = tf.unstack(
relative_regressed_keypoints, axis=3)
y_indices = _to_float32(tf.expand_dims(y_indices, axis=-1))
x_indices = _to_float32(tf.expand_dims(x_indices, axis=-1))
absolute_regressed_keypoints = tf.stack(
[y_indices + relative_regressed_keypoints_y,
x_indices + relative_regressed_keypoints_x],
axis=3)
return tf.reshape(absolute_regressed_keypoints,
[batch_size, num_instances, -1])
def refine_keypoints(regressed_keypoints, keypoint_candidates, keypoint_scores,
num_keypoint_candidates, bboxes=None,
unmatched_keypoint_score=0.1, box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode='min_distance'):
"""Refines regressed keypoints by snapping to the nearest candidate keypoints.
The initial regressed keypoints represent a full set of keypoints regressed
from the centers of the objects. The keypoint candidates are estimated
independently from heatmaps, and are not associated with any object instances.
This function refines the regressed keypoints by "snapping" to the
nearest/highest score/highest score-distance ratio (depending on the
candidate_ranking_mode) candidate of the same keypoint type (e.g. "nose").
If no candidates are nearby, the regressed keypoint remains unchanged.
In order to snap a regressed keypoint to a candidate keypoint, the following
must be satisfied:
- the candidate keypoint must be of the same type as the regressed keypoint
- the candidate keypoint must not lie outside the predicted boxes (or the
boxes which encloses the regressed keypoints for the instance if `bboxes` is
not provided). Note that the box is scaled by
`regressed_box_scale` in height and width, to provide some margin around the
keypoints
- the distance to the closest candidate keypoint cannot exceed
candidate_search_scale * max(height, width), where height and width refer to
the bounding box for the instance.
Note that the same candidate keypoint is allowed to snap to regressed
keypoints in difference instances.
Args:
regressed_keypoints: A float tensor of shape
[batch_size, num_instances, num_keypoints, 2] with the initial regressed
keypoints.
keypoint_candidates: A tensor of shape
[batch_size, max_candidates, num_keypoints, 2] holding the location of
keypoint candidates in [y, x] format (expressed in absolute coordinates in
the output coordinate frame).
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] indicating the scores for
keypoint candidates.
num_keypoint_candidates: An integer tensor of shape
[batch_size, num_keypoints] indicating the number of valid candidates for
each keypoint type, as there may be padding (dim 1) of
`keypoint_candidates` and `keypoint_scores`.
bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted
bounding boxes for each instance, expressed in the output coordinate
frame. If not provided, boxes will be computed from regressed keypoints.
unmatched_keypoint_score: float, the default score to use for regressed
keypoints that are not successfully snapped to a nearby candidate.
box_scale: float, the multiplier to expand the bounding boxes (either the
provided boxes or those which tightly cover the regressed keypoints) for
an instance. This scale is typically larger than 1.0 when not providing
`bboxes`.
candidate_search_scale: float, the scale parameter that multiplies the
largest dimension of a bounding box. The resulting distance becomes a
search radius for candidates in the vicinity of each regressed keypoint.
candidate_ranking_mode: A string as one of ['min_distance',
'score_distance_ratio'] indicating how to select the candidate. If invalid
value is provided, an ValueError will be raised.
Returns:
A tuple with:
refined_keypoints: A float tensor of shape
[batch_size, num_instances, num_keypoints, 2] with the final, refined
keypoints.
refined_scores: A float tensor of shape
[batch_size, num_instances, num_keypoints] with scores associated with all
instances and keypoints in `refined_keypoints`.
Raises:
ValueError: if provided candidate_ranking_mode is not one of
['min_distance', 'score_distance_ratio']
"""
batch_size, num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(regressed_keypoints))
max_candidates = keypoint_candidates.shape[1]
# Replace all invalid (i.e. padded) keypoint candidates with NaN.
# This will prevent them from being considered.
range_tiled = tf.tile(
tf.reshape(tf.range(max_candidates), [1, max_candidates, 1]),
[batch_size, 1, num_keypoints])
num_candidates_tiled = tf.tile(tf.expand_dims(num_keypoint_candidates, 1),
[1, max_candidates, 1])
invalid_candidates = range_tiled >= num_candidates_tiled
nan_mask = tf.where(
invalid_candidates,
np.nan * tf.ones_like(invalid_candidates, dtype=tf.float32),
tf.ones_like(invalid_candidates, dtype=tf.float32))
keypoint_candidates_with_nans = tf.math.multiply(
keypoint_candidates, tf.expand_dims(nan_mask, -1))
# Pairwise squared distances between regressed keypoints and candidate
# keypoints (for a single keypoint type).
# Shape [batch_size, num_instances, 1, num_keypoints, 2].
regressed_keypoint_expanded = tf.expand_dims(regressed_keypoints,
axis=2)
# Shape [batch_size, 1, max_candidates, num_keypoints, 2].
keypoint_candidates_expanded = tf.expand_dims(
keypoint_candidates_with_nans, axis=1)
# Use explicit tensor shape broadcasting (since the tensor dimensions are
# expanded to 5D) to make it tf.lite compatible.
regressed_keypoint_expanded = tf.tile(
regressed_keypoint_expanded, multiples=[1, 1, max_candidates, 1, 1])
keypoint_candidates_expanded = tf.tile(
keypoint_candidates_expanded, multiples=[1, num_instances, 1, 1, 1])
# Replace tf.math.squared_difference by "-" operator and tf.multiply ops since
# tf.lite convert doesn't support squared_difference with undetermined
# dimension.
diff = regressed_keypoint_expanded - keypoint_candidates_expanded
sqrd_distances = tf.math.reduce_sum(tf.multiply(diff, diff), axis=-1)
distances = tf.math.sqrt(sqrd_distances)
# Determine the candidates that have the minimum distance to the regressed
# keypoints. Shape [batch_size, num_instances, num_keypoints].
min_distances = tf.math.reduce_min(distances, axis=2)
if candidate_ranking_mode == 'min_distance':
nearby_candidate_inds = tf.math.argmin(distances, axis=2)
elif candidate_ranking_mode == 'score_distance_ratio':
# tiled_keypoint_scores:
# Shape [batch_size, num_instances, max_candidates, num_keypoints].
tiled_keypoint_scores = tf.tile(
tf.expand_dims(keypoint_scores, axis=1),
multiples=[1, num_instances, 1, 1])
ranking_scores = tiled_keypoint_scores / (distances + 1e-6)
nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2)
else:
raise ValueError('Not recognized candidate_ranking_mode: %s' %
candidate_ranking_mode)
# Gather the coordinates and scores corresponding to the closest candidates.
# Shape of tensors are [batch_size, num_instances, num_keypoints, 2] and
# [batch_size, num_instances, num_keypoints], respectively.
nearby_candidate_coords, nearby_candidate_scores = (
_gather_candidates_at_indices(keypoint_candidates, keypoint_scores,
nearby_candidate_inds))
if bboxes is None:
# Create bboxes from regressed keypoints.
# Shape [batch_size * num_instances, 4].
regressed_keypoints_flattened = tf.reshape(
regressed_keypoints, [-1, num_keypoints, 2])
bboxes_flattened = keypoint_ops.keypoints_to_enclosing_bounding_boxes(
regressed_keypoints_flattened)
else:
bboxes_flattened = tf.reshape(bboxes, [-1, 4])
# Scale the bounding boxes.
# Shape [batch_size, num_instances, 4].
boxlist = box_list.BoxList(bboxes_flattened)
boxlist_scaled = box_list_ops.scale_height_width(
boxlist, box_scale, box_scale)
bboxes_scaled = boxlist_scaled.get()
bboxes = tf.reshape(bboxes_scaled, [batch_size, num_instances, 4])
# Get ymin, xmin, ymax, xmax bounding box coordinates, tiled per keypoint.
# Shape [batch_size, num_instances, num_keypoints].
bboxes_tiled = tf.tile(tf.expand_dims(bboxes, 2), [1, 1, num_keypoints, 1])
ymin, xmin, ymax, xmax = tf.unstack(bboxes_tiled, axis=3)
# Produce a mask that indicates whether the original regressed keypoint
# should be used instead of a candidate keypoint.
# Shape [batch_size, num_instances, num_keypoints].
search_radius = (
tf.math.maximum(ymax - ymin, xmax - xmin) * candidate_search_scale)
mask = (tf.cast(nearby_candidate_coords[:, :, :, 0] < ymin, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 0] > ymax, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 1] < xmin, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 1] > xmax, tf.int32) +
# Filter out the chosen candidate with score lower than unmatched
# keypoint score.
tf.cast(nearby_candidate_scores <
unmatched_keypoint_score, tf.int32) +
tf.cast(min_distances > search_radius, tf.int32))
mask = mask > 0
# Create refined keypoints where candidate keypoints replace original
# regressed keypoints if they are in the vicinity of the regressed keypoints.
# Shape [batch_size, num_instances, num_keypoints, 2].
refined_keypoints = tf.where(
tf.tile(tf.expand_dims(mask, -1), [1, 1, 1, 2]),
regressed_keypoints,
nearby_candidate_coords)
# Update keypoints scores. In the case where we use the original regressed
# keypoints, we use a default score of `unmatched_keypoint_score`.
# Shape [batch_size, num_instances, num_keypoints].
refined_scores = tf.where(
mask,
unmatched_keypoint_score * tf.ones_like(nearby_candidate_scores),
nearby_candidate_scores)
return refined_keypoints, refined_scores
def _pad_to_full_keypoint_dim(keypoint_coords, keypoint_scores, keypoint_inds,
num_total_keypoints):
"""Scatter keypoint elements into tensors with full keypoints dimension.
Args:
keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32
tensor.
keypoint_scores: a [batch_size, num_instances, num_keypoints] float32
tensor.
keypoint_inds: a list of integers that indicate the keypoint indices for
this specific keypoint class. These indices are used to scatter into
tensors that have a `num_total_keypoints` dimension.
num_total_keypoints: The total number of keypoints that this model predicts.
Returns:
A tuple with
keypoint_coords_padded: a
[batch_size, num_instances, num_total_keypoints,2] float32 tensor.
keypoint_scores_padded: a [batch_size, num_instances, num_total_keypoints]
float32 tensor.
"""
batch_size, num_instances, _, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
kpt_coords_transposed = tf.transpose(keypoint_coords, [2, 0, 1, 3])
kpt_scores_transposed = tf.transpose(keypoint_scores, [2, 0, 1])
kpt_inds_tensor = tf.expand_dims(keypoint_inds, axis=-1)
kpt_coords_scattered = tf.scatter_nd(
indices=kpt_inds_tensor,
updates=kpt_coords_transposed,
shape=[num_total_keypoints, batch_size, num_instances, 2])
kpt_scores_scattered = tf.scatter_nd(
indices=kpt_inds_tensor,
updates=kpt_scores_transposed,
shape=[num_total_keypoints, batch_size, num_instances])
keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 2, 0, 3])
keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 2, 0])
return keypoint_coords_padded, keypoint_scores_padded
def _pad_to_full_instance_dim(keypoint_coords, keypoint_scores, instance_inds,
max_instances):
"""Scatter keypoint elements into tensors with full instance dimension.
Args:
keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32
tensor.
keypoint_scores: a [batch_size, num_instances, num_keypoints] float32
tensor.
instance_inds: a list of integers that indicate the instance indices for
these keypoints. These indices are used to scatter into tensors
that have a `max_instances` dimension.
max_instances: The maximum number of instances detected by the model.
Returns:
A tuple with
keypoint_coords_padded: a [batch_size, max_instances, num_keypoints, 2]
float32 tensor.
keypoint_scores_padded: a [batch_size, max_instances, num_keypoints]
float32 tensor.
"""
batch_size, _, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
kpt_coords_transposed = tf.transpose(keypoint_coords, [1, 0, 2, 3])
kpt_scores_transposed = tf.transpose(keypoint_scores, [1, 0, 2])
instance_inds = tf.expand_dims(instance_inds, axis=-1)
kpt_coords_scattered = tf.scatter_nd(
indices=instance_inds,
updates=kpt_coords_transposed,
shape=[max_instances, batch_size, num_keypoints, 2])
kpt_scores_scattered = tf.scatter_nd(
indices=instance_inds,
updates=kpt_scores_transposed,
shape=[max_instances, batch_size, num_keypoints])
keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 0, 2, 3])
keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 0, 2])
return keypoint_coords_padded, keypoint_scores_padded
def _gather_candidates_at_indices(keypoint_candidates, keypoint_scores,
indices):
"""Gathers keypoint candidate coordinates and scores at indices.
Args:
keypoint_candidates: a float tensor of shape [batch_size, max_candidates,
num_keypoints, 2] with candidate coordinates.
keypoint_scores: a float tensor of shape [batch_size, max_candidates,
num_keypoints] with keypoint scores.
indices: an integer tensor of shape [batch_size, num_indices, num_keypoints]
with indices.
Returns:
A tuple with
gathered_keypoint_candidates: a float tensor of shape [batch_size,
num_indices, num_keypoints, 2] with gathered coordinates.
gathered_keypoint_scores: a float tensor of shape [batch_size,
num_indices, num_keypoints, 2].
"""
batch_size, num_indices, num_keypoints = _get_shape(indices, 3)
# Transpose tensors so that all batch dimensions are up front.
keypoint_candidates_transposed = tf.transpose(keypoint_candidates,
[0, 2, 1, 3])
keypoint_scores_transposed = tf.transpose(keypoint_scores, [0, 2, 1])
nearby_candidate_inds_transposed = tf.transpose(indices, [0, 2, 1])
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
combined_indices = tf.stack([
_multi_range(
batch_size,
value_repetitions=num_keypoints * num_indices,
dtype=tf.int64),
_multi_range(
num_keypoints,
value_repetitions=num_indices,
range_repetitions=batch_size,
dtype=tf.int64),
tf.reshape(nearby_candidate_inds_transposed, [-1])
], axis=1)
nearby_candidate_coords_transposed = tf.gather_nd(
keypoint_candidates_transposed, combined_indices)
nearby_candidate_coords_transposed = tf.reshape(
nearby_candidate_coords_transposed,
[batch_size, num_keypoints, num_indices, -1])
nearby_candidate_scores_transposed = tf.gather_nd(keypoint_scores_transposed,
combined_indices)
nearby_candidate_scores_transposed = tf.reshape(
nearby_candidate_scores_transposed,
[batch_size, num_keypoints, num_indices])
gathered_keypoint_candidates = tf.transpose(
nearby_candidate_coords_transposed, [0, 2, 1, 3])
gathered_keypoint_scores = tf.transpose(nearby_candidate_scores_transposed,
[0, 2, 1])
return gathered_keypoint_candidates, gathered_keypoint_scores
def flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols):
"""Get the index in a flattened array given row and column indices."""
return (row_indices * num_cols) + col_indices
def row_col_channel_indices_from_flattened_indices(indices, num_cols,
num_channels):
"""Computes row, column and channel indices from flattened indices.
Args:
indices: An integer tensor of any shape holding the indices in the flattened
space.
num_cols: Number of columns in the image (width).
num_channels: Number of channels in the image.
Returns:
row_indices: The row indices corresponding to each of the input indices.
Same shape as indices.
col_indices: The column indices corresponding to each of the input indices.
Same shape as indices.
channel_indices. The channel indices corresponding to each of the input
indices.
"""
row_indices = (indices // num_channels) // num_cols
col_indices = (indices // num_channels) % num_cols
channel_indices = indices % num_channels
return row_indices, col_indices, channel_indices
def get_valid_anchor_weights_in_flattened_image(true_image_shapes, height,
width):
"""Computes valid anchor weights for an image assuming pixels will be flattened.
This function is useful when we only want to penalize valid areas in the
image in the case when padding is used. The function assumes that the loss
function will be applied after flattening the spatial dimensions and returns
anchor weights accordingly.
Args:
true_image_shapes: An integer tensor of shape [batch_size, 3] representing
the true image shape (without padding) for each sample in the batch.
height: height of the prediction from the network.
width: width of the prediction from the network.
Returns:
valid_anchor_weights: a float tensor of shape [batch_size, height * width]
with 1s in locations where the spatial coordinates fall within the height
and width in true_image_shapes.
"""
indices = tf.reshape(tf.range(height * width), [1, -1])
batch_size = tf.shape(true_image_shapes)[0]
batch_indices = tf.ones((batch_size, 1), dtype=tf.int32) * indices
y_coords, x_coords, _ = row_col_channel_indices_from_flattened_indices(
batch_indices, width, 1)
max_y, max_x = true_image_shapes[:, 0], true_image_shapes[:, 1]
max_x = _to_float32(tf.expand_dims(max_x, 1))
max_y = _to_float32(tf.expand_dims(max_y, 1))
x_coords = _to_float32(x_coords)
y_coords = _to_float32(y_coords)
valid_mask = tf.math.logical_and(x_coords < max_x, y_coords < max_y)
return _to_float32(valid_mask)
def convert_strided_predictions_to_normalized_boxes(boxes, stride,
true_image_shapes):
"""Converts predictions in the output space to normalized boxes.
Boxes falling outside the valid image boundary are clipped to be on the
boundary.
Args:
boxes: A tensor of shape [batch_size, num_boxes, 4] holding the raw
coordinates of boxes in the model's output space.
stride: The stride in the output space.
true_image_shapes: A tensor of shape [batch_size, 3] representing the true
shape of the input not considering padding.
Returns:
boxes: A tensor of shape [batch_size, num_boxes, 4] representing the
coordinates of the normalized boxes.
"""
def _normalize_boxlist(args):
boxes, height, width = args
boxes = box_list_ops.scale(boxes, stride, stride)
boxes = box_list_ops.to_normalized_coordinates(boxes, height, width)
boxes = box_list_ops.clip_to_window(boxes, [0., 0., 1., 1.],
filter_nonoverlapping=False)
return boxes
box_lists = [box_list.BoxList(boxes) for boxes in tf.unstack(boxes, axis=0)]
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
true_heights_list = tf.unstack(true_heights, axis=0)
true_widths_list = tf.unstack(true_widths, axis=0)
box_lists = list(map(_normalize_boxlist,
zip(box_lists, true_heights_list, true_widths_list)))
boxes = tf.stack([box_list_instance.get() for
box_list_instance in box_lists], axis=0)
return boxes
def convert_strided_predictions_to_normalized_keypoints(
keypoint_coords, keypoint_scores, stride, true_image_shapes,
clip_out_of_frame_keypoints=False):
"""Converts predictions in the output space to normalized keypoints.
If clip_out_of_frame_keypoints=False, keypoint coordinates falling outside
the valid image boundary are normalized but not clipped; If
clip_out_of_frame_keypoints=True, keypoint coordinates falling outside the
valid image boundary are clipped to the closest image boundary and the scores
will be set to 0.0.
Args:
keypoint_coords: A tensor of shape
[batch_size, num_instances, num_keypoints, 2] holding the raw coordinates
of keypoints in the model's output space.
keypoint_scores: A tensor of shape
[batch_size, num_instances, num_keypoints] holding the keypoint scores.
stride: The stride in the output space.
true_image_shapes: A tensor of shape [batch_size, 3] representing the true
shape of the input not considering padding.
clip_out_of_frame_keypoints: A boolean indicating whether keypoints outside
the image boundary should be clipped. If True, keypoint coords will be
clipped to image boundary. If False, keypoints are normalized but not
filtered based on their location.
Returns:
keypoint_coords_normalized: A tensor of shape
[batch_size, num_instances, num_keypoints, 2] representing the coordinates
of the normalized keypoints.
keypoint_scores: A tensor of shape
[batch_size, num_instances, num_keypoints] representing the updated
keypoint scores.
"""
# Flatten keypoints and scores.
batch_size, _, _, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
# Scale and normalize keypoints.
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
yscale = float(stride) / tf.cast(true_heights, tf.float32)
xscale = float(stride) / tf.cast(true_widths, tf.float32)
yx_scale = tf.stack([yscale, xscale], axis=1)
keypoint_coords_normalized = keypoint_coords * tf.reshape(
yx_scale, [batch_size, 1, 1, 2])
if clip_out_of_frame_keypoints:
# Determine the keypoints that are in the true image regions.
valid_indices = tf.logical_and(
tf.logical_and(keypoint_coords_normalized[:, :, :, 0] >= 0.0,
keypoint_coords_normalized[:, :, :, 0] <= 1.0),
tf.logical_and(keypoint_coords_normalized[:, :, :, 1] >= 0.0,
keypoint_coords_normalized[:, :, :, 1] <= 1.0))
batch_window = tf.tile(
tf.constant([[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32),
multiples=[batch_size, 1])
def clip_to_window(inputs):
keypoints, window = inputs
return keypoint_ops.clip_to_window(keypoints, window)
# Specify the TensorSpec explicitly in the tf.map_fn to make it tf.lite
# compatible.
kpts_dims = _get_shape(keypoint_coords_normalized, 4)
output_spec = tf.TensorSpec(
shape=[kpts_dims[1], kpts_dims[2], kpts_dims[3]], dtype=tf.float32)
keypoint_coords_normalized = tf.map_fn(
clip_to_window, (keypoint_coords_normalized, batch_window),
dtype=tf.float32, back_prop=False,
fn_output_signature=output_spec)
keypoint_scores = tf.where(valid_indices, keypoint_scores,
tf.zeros_like(keypoint_scores))
return keypoint_coords_normalized, keypoint_scores
def convert_strided_predictions_to_instance_masks(
boxes, classes, masks, true_image_shapes,
densepose_part_heatmap=None, densepose_surface_coords=None, stride=4,
mask_height=256, mask_width=256, score_threshold=0.5,
densepose_class_index=-1):
"""Converts predicted full-image masks into instance masks.
For each predicted detection box:
* Crop and resize the predicted mask (and optionally DensePose coordinates)
based on the detected bounding box coordinates and class prediction. Uses
bilinear resampling.
* Binarize the mask using the provided score threshold.
Args:
boxes: A tensor of shape [batch, max_detections, 4] holding the predicted
boxes, in normalized coordinates (relative to the true image dimensions).
classes: An integer tensor of shape [batch, max_detections] containing the
detected class for each box (0-indexed).
masks: A [batch, output_height, output_width, num_classes] float32
tensor with class probabilities.
true_image_shapes: A tensor of shape [batch, 3] representing the true
shape of the inputs not considering padding.
densepose_part_heatmap: (Optional) A [batch, output_height, output_width,
num_parts] float32 tensor with part scores (i.e. logits).
densepose_surface_coords: (Optional) A [batch, output_height, output_width,
2 * num_parts] float32 tensor with predicted part coordinates (in
vu-format).
stride: The stride in the output space.
mask_height: The desired resized height for instance masks.
mask_width: The desired resized width for instance masks.
score_threshold: The threshold at which to convert predicted mask
into foreground pixels.
densepose_class_index: The class index (0-indexed) corresponding to the
class which has DensePose labels (e.g. person class).
Returns:
A tuple of masks and surface_coords.
instance_masks: A [batch_size, max_detections, mask_height, mask_width]
uint8 tensor with predicted foreground mask for each
instance. If DensePose tensors are provided, then each pixel value in the
mask encodes the 1-indexed part.
surface_coords: A [batch_size, max_detections, mask_height, mask_width, 2]
float32 tensor with (v, u) coordinates. Note that v, u coordinates are
only defined on instance masks, and the coordinates at each location of
the foreground mask correspond to coordinates on a local part coordinate
system (the specific part can be inferred from the `instance_masks`
output. If DensePose feature maps are not passed to this function, this
output will be None.
Raises:
ValueError: If one but not both of `densepose_part_heatmap` and
`densepose_surface_coords` is provided.
"""
batch_size, output_height, output_width, _ = (
shape_utils.combined_static_and_dynamic_shape(masks))
input_height = stride * output_height
input_width = stride * output_width
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
# If necessary, create dummy DensePose tensors to simplify the map function.
densepose_present = True
if ((densepose_part_heatmap is not None) ^
(densepose_surface_coords is not None)):
raise ValueError('To use DensePose, both `densepose_part_heatmap` and '
'`densepose_surface_coords` must be provided')
if densepose_part_heatmap is None and densepose_surface_coords is None:
densepose_present = False
densepose_part_heatmap = tf.zeros(
(batch_size, output_height, output_width, 1), dtype=tf.float32)
densepose_surface_coords = tf.zeros(
(batch_size, output_height, output_width, 2), dtype=tf.float32)
crop_and_threshold_fn = functools.partial(
crop_and_threshold_masks, input_height=input_height,
input_width=input_width, mask_height=mask_height, mask_width=mask_width,
score_threshold=score_threshold,
densepose_class_index=densepose_class_index)
instance_masks, surface_coords = shape_utils.static_or_dynamic_map_fn(
crop_and_threshold_fn,
elems=[boxes, classes, masks, densepose_part_heatmap,
densepose_surface_coords, true_heights, true_widths],
dtype=[tf.uint8, tf.float32],
back_prop=False)
surface_coords = surface_coords if densepose_present else None
return instance_masks, surface_coords
def crop_and_threshold_masks(elems, input_height, input_width, mask_height=256,
mask_width=256, score_threshold=0.5,
densepose_class_index=-1):
"""Crops and thresholds masks based on detection boxes.
Args:
elems: A tuple of
boxes - float32 tensor of shape [max_detections, 4]
classes - int32 tensor of shape [max_detections] (0-indexed)
masks - float32 tensor of shape [output_height, output_width, num_classes]
part_heatmap - float32 tensor of shape [output_height, output_width,
num_parts]
surf_coords - float32 tensor of shape [output_height, output_width,
2 * num_parts]
true_height - scalar int tensor
true_width - scalar int tensor
input_height: Input height to network.
input_width: Input width to network.
mask_height: Height for resizing mask crops.
mask_width: Width for resizing mask crops.
score_threshold: The threshold at which to convert predicted mask
into foreground pixels.
densepose_class_index: scalar int tensor with the class index (0-indexed)
for DensePose.
Returns:
A tuple of
all_instances: A [max_detections, mask_height, mask_width] uint8 tensor
with a predicted foreground mask for each instance. Background is encoded
as 0, and foreground is encoded as a positive integer. Specific part
indices are encoded as 1-indexed parts (for classes that have part
information).
surface_coords: A [max_detections, mask_height, mask_width, 2]
float32 tensor with (v, u) coordinates. for each part.
"""
(boxes, classes, masks, part_heatmap, surf_coords, true_height,
true_width) = elems
# Boxes are in normalized coordinates relative to true image shapes. Convert
# coordinates to be normalized relative to input image shapes (since masks
# may still have padding).
boxlist = box_list.BoxList(boxes)
y_scale = true_height / input_height
x_scale = true_width / input_width
boxlist = box_list_ops.scale(boxlist, y_scale, x_scale)
boxes = boxlist.get()
# Convert masks from [output_height, output_width, num_classes] to
# [num_classes, output_height, output_width, 1].
num_classes = tf.shape(masks)[-1]
masks_4d = tf.transpose(masks, perm=[2, 0, 1])[:, :, :, tf.newaxis]
# Tile part and surface coordinate masks for all classes.
part_heatmap_4d = tf.tile(part_heatmap[tf.newaxis, :, :, :],
multiples=[num_classes, 1, 1, 1])
surf_coords_4d = tf.tile(surf_coords[tf.newaxis, :, :, :],
multiples=[num_classes, 1, 1, 1])
feature_maps_concat = tf.concat([masks_4d, part_heatmap_4d, surf_coords_4d],
axis=-1)
# The following tensor has shape
# [max_detections, mask_height, mask_width, 1 + 3 * num_parts].
cropped_masks = tf2.image.crop_and_resize(
feature_maps_concat,
boxes=boxes,
box_indices=classes,
crop_size=[mask_height, mask_width],
method='bilinear')
# Split the cropped masks back into instance masks, part masks, and surface
# coordinates.
num_parts = tf.shape(part_heatmap)[-1]
instance_masks, part_heatmap_cropped, surface_coords_cropped = tf.split(
cropped_masks, [1, num_parts, 2 * num_parts], axis=-1)
# Threshold the instance masks. Resulting tensor has shape
# [max_detections, mask_height, mask_width, 1].
instance_masks_int = tf.cast(
tf.math.greater_equal(instance_masks, score_threshold), dtype=tf.int32)
# Produce a binary mask that is 1.0 only:
# - in the foreground region for an instance
# - in detections corresponding to the DensePose class
det_with_parts = tf.equal(classes, densepose_class_index)
det_with_parts = tf.cast(
tf.reshape(det_with_parts, [-1, 1, 1, 1]), dtype=tf.int32)
instance_masks_with_parts = tf.math.multiply(instance_masks_int,
det_with_parts)
# Similarly, produce a binary mask that holds the foreground masks only for
# instances without parts (i.e. non-DensePose classes).
det_without_parts = 1 - det_with_parts
instance_masks_without_parts = tf.math.multiply(instance_masks_int,
det_without_parts)
# Assemble a tensor that has standard instance segmentation masks for
# non-DensePose classes (with values in [0, 1]), and part segmentation masks
# for DensePose classes (with vaues in [0, 1, ..., num_parts]).
part_mask_int_zero_indexed = tf.math.argmax(
part_heatmap_cropped, axis=-1, output_type=tf.int32)[:, :, :, tf.newaxis]
part_mask_int_one_indexed = part_mask_int_zero_indexed + 1
all_instances = (instance_masks_without_parts +
instance_masks_with_parts * part_mask_int_one_indexed)
# Gather the surface coordinates for the parts.
surface_coords_cropped = tf.reshape(
surface_coords_cropped, [-1, mask_height, mask_width, num_parts, 2])
surface_coords = gather_surface_coords_for_parts(surface_coords_cropped,
part_mask_int_zero_indexed)
surface_coords = (
surface_coords * tf.cast(instance_masks_with_parts, tf.float32))
return [tf.squeeze(all_instances, axis=3), surface_coords]
def gather_surface_coords_for_parts(surface_coords_cropped,
highest_scoring_part):
"""Gathers the (v, u) coordinates for the highest scoring DensePose parts.
Args:
surface_coords_cropped: A [max_detections, height, width, num_parts, 2]
float32 tensor with (v, u) surface coordinates.
highest_scoring_part: A [max_detections, height, width] integer tensor with
the highest scoring part (0-indexed) indices for each location.
Returns:
A [max_detections, height, width, 2] float32 tensor with the (v, u)
coordinates selected from the highest scoring parts.
"""
max_detections, height, width, num_parts, _ = (
shape_utils.combined_static_and_dynamic_shape(surface_coords_cropped))
flattened_surface_coords = tf.reshape(surface_coords_cropped, [-1, 2])
flattened_part_ids = tf.reshape(highest_scoring_part, [-1])
# Produce lookup indices that represent the locations of the highest scoring
# parts in the `flattened_surface_coords` tensor.
flattened_lookup_indices = (
num_parts * tf.range(max_detections * height * width) +
flattened_part_ids)
vu_coords_flattened = tf.gather(flattened_surface_coords,
flattened_lookup_indices, axis=0)
return tf.reshape(vu_coords_flattened, [max_detections, height, width, 2])
def predicted_embeddings_at_object_centers(embedding_predictions,
y_indices, x_indices):
"""Returns the predicted embeddings at specified object centers.
Args:
embedding_predictions: A float tensor of shape [batch_size, height, width,
reid_embed_size] holding predicted embeddings.
y_indices: A [batch, num_instances] int tensor holding y indices for object
centers. These indices correspond to locations in the output feature map.
x_indices: A [batch, num_instances] int tensor holding x indices for object
centers. These indices correspond to locations in the output feature map.
Returns:
A float tensor of shape [batch_size, num_objects, reid_embed_size] where
predicted embeddings are gathered at the provided locations.
"""
batch_size, _, width, _ = _get_shape(embedding_predictions, 4)
flattened_indices = flattened_indices_from_row_col_indices(
y_indices, x_indices, width)
_, num_instances = _get_shape(flattened_indices, 2)
embeddings_flat = _flatten_spatial_dimensions(embedding_predictions)
embeddings = tf.gather(embeddings_flat, flattened_indices, batch_dims=1)
embeddings = tf.reshape(embeddings, [batch_size, num_instances, -1])
return embeddings
class ObjectDetectionParams(
collections.namedtuple('ObjectDetectionParams', [
'localization_loss', 'scale_loss_weight', 'offset_loss_weight',
'task_loss_weight'
])):
"""Namedtuple to host object detection related parameters.
This is a wrapper class over the fields that are either the hyper-parameters
or the loss functions needed for the object detection task. The class is
immutable after constructed. Please see the __new__ function for detailed
information for each fields.
"""
__slots__ = ()
def __new__(cls,
localization_loss,
scale_loss_weight,
offset_loss_weight,
task_loss_weight=1.0):
"""Constructor with default values for ObjectDetectionParams.
Args:
localization_loss: a object_detection.core.losses.Loss object to compute
the loss for the center offset and height/width predictions in
CenterNet.
scale_loss_weight: float, The weight for localizing box size. Note that
the scale loss is dependent on the input image size, since we penalize
the raw height and width. This constant may need to be adjusted
depending on the input size.
offset_loss_weight: float, The weight for localizing center offsets.
task_loss_weight: float, the weight of the object detection loss.
Returns:
An initialized ObjectDetectionParams namedtuple.
"""
return super(ObjectDetectionParams,
cls).__new__(cls, localization_loss, scale_loss_weight,
offset_loss_weight, task_loss_weight)
class KeypointEstimationParams(
collections.namedtuple('KeypointEstimationParams', [
'task_name', 'class_id', 'keypoint_indices', 'classification_loss',
'localization_loss', 'keypoint_labels', 'keypoint_std_dev',
'keypoint_heatmap_loss_weight', 'keypoint_offset_loss_weight',
'keypoint_regression_loss_weight', 'keypoint_candidate_score_threshold',
'heatmap_bias_init', 'num_candidates_per_keypoint', 'task_loss_weight',
'peak_max_pool_kernel_size', 'unmatched_keypoint_score', 'box_scale',
'candidate_search_scale', 'candidate_ranking_mode',
'offset_peak_radius', 'per_keypoint_offset'
])):
"""Namedtuple to host object detection related parameters.
This is a wrapper class over the fields that are either the hyper-parameters
or the loss functions needed for the keypoint estimation task. The class is
immutable after constructed. Please see the __new__ function for detailed
information for each fields.
"""
__slots__ = ()
def __new__(cls,
task_name,
class_id,
keypoint_indices,
classification_loss,
localization_loss,
keypoint_labels=None,
keypoint_std_dev=None,
keypoint_heatmap_loss_weight=1.0,
keypoint_offset_loss_weight=1.0,
keypoint_regression_loss_weight=1.0,
keypoint_candidate_score_threshold=0.1,
heatmap_bias_init=-2.19,
num_candidates_per_keypoint=100,
task_loss_weight=1.0,
peak_max_pool_kernel_size=3,
unmatched_keypoint_score=0.1,
box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode='min_distance',
offset_peak_radius=0,
per_keypoint_offset=False):
"""Constructor with default values for KeypointEstimationParams.
Args:
task_name: string, the name of the task this namedtuple corresponds to.
Note that it should be an unique identifier of the task.
class_id: int, the ID of the class that contains the target keypoints to
considered in this task. For example, if the task is human pose
estimation, the class id should correspond to the "human" class. Note
that the ID is 0-based, meaning that class 0 corresponds to the first
non-background object class.
keypoint_indices: A list of integers representing the indicies of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints from gt_keypoints that should be considered in
this task.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
localization_loss: an object_detection.core.losses.Loss object to compute
the loss for the center offset and height/width predictions in
CenterNet.
keypoint_labels: A list of strings representing the label text of each
keypoint, e.g. "nose", 'left_shoulder". Note that the length of this
list should be equal to keypoint_indices.
keypoint_std_dev: A list of float represent the standard deviation of the
Gaussian kernel used to generate the keypoint heatmap. It is to provide
the flexibility of using different sizes of Gaussian kernel for each
keypoint class.
keypoint_heatmap_loss_weight: float, The weight for the keypoint heatmap.
keypoint_offset_loss_weight: float, The weight for the keypoint offsets
loss.
keypoint_regression_loss_weight: float, The weight for keypoint regression
loss. Note that the loss is dependent on the input image size, since we
penalize the raw height and width. This constant may need to be adjusted
depending on the input size.
keypoint_candidate_score_threshold: float, The heatmap score threshold for
a keypoint to become a valid candidate.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the class prediction head. If set to None, the bias is
initialized with zeros.
num_candidates_per_keypoint: The maximum number of candidates to retrieve
for each keypoint.
task_loss_weight: float, the weight of the keypoint estimation loss.
peak_max_pool_kernel_size: Max pool kernel size to use to pull off peak
score locations in a neighborhood (independently for each keypoint
types).
unmatched_keypoint_score: The default score to use for regressed keypoints
that are not successfully snapped to a nearby candidate.
box_scale: The multiplier to expand the bounding boxes (either the
provided boxes or those which tightly cover the regressed keypoints).
candidate_search_scale: The scale parameter that multiplies the largest
dimension of a bounding box. The resulting distance becomes a search
radius for candidates in the vicinity of each regressed keypoint.
candidate_ranking_mode: One of ['min_distance', 'score_distance_ratio']
indicating how to select the keypoint candidate.
offset_peak_radius: The radius (in the unit of output pixel) around
groundtruth heatmap peak to assign the offset targets. If set 0, then
the offset target will only be assigned to the heatmap peak (same
behavior as the original paper).
per_keypoint_offset: A bool indicates whether to assign offsets for each
keypoint channel separately. If set False, the output offset target has
the shape [batch_size, out_height, out_width, 2] (same behavior as the
original paper). If set True, the output offset target has the shape
[batch_size, out_height, out_width, 2 * num_keypoints] (recommended when
the offset_peak_radius is not zero).
Returns:
An initialized KeypointEstimationParams namedtuple.
"""
return super(KeypointEstimationParams, cls).__new__(
cls, task_name, class_id, keypoint_indices, classification_loss,
localization_loss, keypoint_labels, keypoint_std_dev,
keypoint_heatmap_loss_weight, keypoint_offset_loss_weight,
keypoint_regression_loss_weight, keypoint_candidate_score_threshold,
heatmap_bias_init, num_candidates_per_keypoint, task_loss_weight,
peak_max_pool_kernel_size, unmatched_keypoint_score, box_scale,
candidate_search_scale, candidate_ranking_mode, offset_peak_radius,
per_keypoint_offset)
class ObjectCenterParams(
collections.namedtuple('ObjectCenterParams', [
'classification_loss', 'object_center_loss_weight', 'heatmap_bias_init',
'min_box_overlap_iou', 'max_box_predictions', 'use_only_known_classes'
])):
"""Namedtuple to store object center prediction related parameters."""
__slots__ = ()
def __new__(cls,
classification_loss,
object_center_loss_weight,
heatmap_bias_init=-2.19,
min_box_overlap_iou=0.7,
max_box_predictions=100,
use_labeled_classes=False):
"""Constructor with default values for ObjectCenterParams.
Args:
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
object_center_loss_weight: float, The weight for the object center loss.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the object center prediction head. If set to None, the bias is
initialized with zeros.
min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes
need have with groundtruth boxes to not be penalized. This is used for
computing the class specific center heatmaps.
max_box_predictions: int, the maximum number of boxes to predict.
use_labeled_classes: boolean, compute the loss only labeled classes.
Returns:
An initialized ObjectCenterParams namedtuple.
"""
return super(ObjectCenterParams,
cls).__new__(cls, classification_loss,
object_center_loss_weight, heatmap_bias_init,
min_box_overlap_iou, max_box_predictions,
use_labeled_classes)
class MaskParams(
collections.namedtuple('MaskParams', [
'classification_loss', 'task_loss_weight', 'mask_height', 'mask_width',
'score_threshold', 'heatmap_bias_init'
])):
"""Namedtuple to store mask prediction related parameters."""
__slots__ = ()
def __new__(cls,
classification_loss,
task_loss_weight=1.0,
mask_height=256,
mask_width=256,
score_threshold=0.5,
heatmap_bias_init=-2.19):
"""Constructor with default values for MaskParams.
Args:
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the semantic segmentation predictions in CenterNet.
task_loss_weight: float, The loss weight for the segmentation task.
mask_height: The height of the resized instance segmentation mask.
mask_width: The width of the resized instance segmentation mask.
score_threshold: The threshold at which to convert predicted mask
probabilities (after passing through sigmoid) into foreground pixels.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the semantic segmentation prediction head. If set to None, the
bias is initialized with zeros.
Returns:
An initialized MaskParams namedtuple.
"""
return super(MaskParams,
cls).__new__(cls, classification_loss,
task_loss_weight, mask_height, mask_width,
score_threshold, heatmap_bias_init)
class DensePoseParams(
collections.namedtuple('DensePoseParams', [
'class_id', 'classification_loss', 'localization_loss',
'part_loss_weight', 'coordinate_loss_weight', 'num_parts',
'task_loss_weight', 'upsample_to_input_res', 'upsample_method',
'heatmap_bias_init'
])):
"""Namedtuple to store DensePose prediction related parameters."""
__slots__ = ()
def __new__(cls,
class_id,
classification_loss,
localization_loss,
part_loss_weight=1.0,
coordinate_loss_weight=1.0,
num_parts=24,
task_loss_weight=1.0,
upsample_to_input_res=True,
upsample_method='bilinear',
heatmap_bias_init=-2.19):
"""Constructor with default values for DensePoseParams.
Args:
class_id: the ID of the class that contains the DensePose groundtruth.
This should typically correspond to the "person" class. Note that the ID
is 0-based, meaning that class 0 corresponds to the first non-background
object class.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the body part predictions in CenterNet.
localization_loss: an object_detection.core.losses.Loss object to compute
the loss for the surface coordinate regression in CenterNet.
part_loss_weight: The loss weight to apply to part prediction.
coordinate_loss_weight: The loss weight to apply to surface coordinate
prediction.
num_parts: The number of DensePose parts to predict.
task_loss_weight: float, the loss weight for the DensePose task.
upsample_to_input_res: Whether to upsample the DensePose feature maps to
the input resolution before applying loss. Note that the prediction
outputs are still at the standard CenterNet output stride.
upsample_method: Method for upsampling DensePose feature maps. Options are
either 'bilinear' or 'nearest'). This takes no effect when
`upsample_to_input_res` is False.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the part prediction head. If set to None, the
bias is initialized with zeros.
Returns:
An initialized DensePoseParams namedtuple.
"""
return super(DensePoseParams,
cls).__new__(cls, class_id, classification_loss,
localization_loss, part_loss_weight,
coordinate_loss_weight, num_parts,
task_loss_weight, upsample_to_input_res,
upsample_method, heatmap_bias_init)
class TrackParams(
collections.namedtuple('TrackParams', [
'num_track_ids', 'reid_embed_size', 'num_fc_layers',
'classification_loss', 'task_loss_weight'
])):
"""Namedtuple to store tracking prediction related parameters."""
__slots__ = ()
def __new__(cls,
num_track_ids,
reid_embed_size,
num_fc_layers,
classification_loss,
task_loss_weight=1.0):
"""Constructor with default values for TrackParams.
Args:
num_track_ids: int. The maximum track ID in the dataset. Used for ReID
embedding classification task.
reid_embed_size: int. The embedding size for ReID task.
num_fc_layers: int. The number of (fully-connected, batch-norm, relu)
layers for track ID classification head.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the ReID embedding in CenterNet.
task_loss_weight: float, the loss weight for the tracking task.
Returns:
An initialized TrackParams namedtuple.
"""
return super(TrackParams,
cls).__new__(cls, num_track_ids, reid_embed_size,
num_fc_layers, classification_loss,
task_loss_weight)
class TemporalOffsetParams(
collections.namedtuple('TemporalOffsetParams', [
'localization_loss', 'task_loss_weight'
])):
"""Namedtuple to store temporal offset related parameters."""
__slots__ = ()
def __new__(cls,
localization_loss,
task_loss_weight=1.0):
"""Constructor with default values for TrackParams.
Args:
localization_loss: an object_detection.core.losses.Loss object to
compute the loss for the temporal offset in CenterNet.
task_loss_weight: float, the loss weight for the temporal offset
task.
Returns:
An initialized TemporalOffsetParams namedtuple.
"""
return super(TemporalOffsetParams,
cls).__new__(cls, localization_loss, task_loss_weight)
# The following constants are used to generate the keys of the
# (prediction, loss, target assigner,...) dictionaries used in CenterNetMetaArch
# class.
DETECTION_TASK = 'detection_task'
OBJECT_CENTER = 'object_center'
BOX_SCALE = 'box/scale'
BOX_OFFSET = 'box/offset'
KEYPOINT_REGRESSION = 'keypoint/regression'
KEYPOINT_HEATMAP = 'keypoint/heatmap'
KEYPOINT_OFFSET = 'keypoint/offset'
SEGMENTATION_TASK = 'segmentation_task'
SEGMENTATION_HEATMAP = 'segmentation/heatmap'
DENSEPOSE_TASK = 'densepose_task'
DENSEPOSE_HEATMAP = 'densepose/heatmap'
DENSEPOSE_REGRESSION = 'densepose/regression'
LOSS_KEY_PREFIX = 'Loss'
TRACK_TASK = 'track_task'
TRACK_REID = 'track/reid'
TEMPORALOFFSET_TASK = 'temporal_offset_task'
TEMPORAL_OFFSET = 'track/offset'
def get_keypoint_name(task_name, head_name):
return '%s/%s' % (task_name, head_name)
def get_num_instances_from_weights(groundtruth_weights_list):
"""Computes the number of instances/boxes from the weights in a batch.
Args:
groundtruth_weights_list: A list of float tensors with shape
[max_num_instances] representing whether there is an actual instance in
the image (with non-zero value) or is padded to match the
max_num_instances (with value 0.0). The list represents the batch
dimension.
Returns:
A scalar integer tensor incidating how many instances/boxes are in the
images in the batch. Note that this function is usually used to normalize
the loss so the minimum return value is 1 to avoid weird behavior.
"""
num_instances = tf.reduce_sum(
[tf.math.count_nonzero(w) for w in groundtruth_weights_list])
num_instances = tf.maximum(num_instances, 1)
return num_instances
class CenterNetMetaArch(model.DetectionModel):
"""The CenterNet meta architecture [1].
[1]: https://arxiv.org/abs/1904.07850
"""
def __init__(self,
is_training,
add_summaries,
num_classes,
feature_extractor,
image_resizer_fn,
object_center_params,
object_detection_params=None,
keypoint_params_dict=None,
mask_params=None,
densepose_params=None,
track_params=None,
temporal_offset_params=None,
use_depthwise=False):
"""Initializes a CenterNet model.
Args:
is_training: Set to True if this model is being built for training.
add_summaries: Whether to add tf summaries in the model.
num_classes: int, The number of classes that the model should predict.
feature_extractor: A CenterNetFeatureExtractor to use to extract features
from an image.
image_resizer_fn: a callable for image resizing. This callable always
takes a rank-3 image tensor (corresponding to a single image) and
returns a rank-3 image tensor, possibly with new spatial dimensions and
a 1-D tensor of shape [3] indicating shape of true image within the
resized image tensor as the resized image tensor could be padded. See
builders/image_resizer_builder.py.
object_center_params: An ObjectCenterParams namedtuple. This object holds
the hyper-parameters for object center prediction. This is required by
either object detection or keypoint estimation tasks.
object_detection_params: An ObjectDetectionParams namedtuple. This object
holds the hyper-parameters necessary for object detection. Please see
the class definition for more details.
keypoint_params_dict: A dictionary that maps from task name to the
corresponding KeypointEstimationParams namedtuple. This object holds the
hyper-parameters necessary for multiple keypoint estimations. Please
see the class definition for more details.
mask_params: A MaskParams namedtuple. This object
holds the hyper-parameters for segmentation. Please see the class
definition for more details.
densepose_params: A DensePoseParams namedtuple. This object holds the
hyper-parameters for DensePose prediction. Please see the class
definition for more details. Note that if this is provided, it is
expected that `mask_params` is also provided.
track_params: A TrackParams namedtuple. This object
holds the hyper-parameters for tracking. Please see the class
definition for more details.
temporal_offset_params: A TemporalOffsetParams namedtuple. This object
holds the hyper-parameters for offset prediction based tracking.
use_depthwise: If true, all task heads will be constructed using
separable_conv. Otherwise, standard convoltuions will be used.
"""
assert object_detection_params or keypoint_params_dict
# Shorten the name for convenience and better formatting.
self._is_training = is_training
# The Objects as Points paper attaches loss functions to multiple
# (`num_feature_outputs`) feature maps in the the backbone. E.g.
# for the hourglass backbone, `num_feature_outputs` is 2.
self._feature_extractor = feature_extractor
self._num_feature_outputs = feature_extractor.num_feature_outputs
self._stride = self._feature_extractor.out_stride
self._image_resizer_fn = image_resizer_fn
self._center_params = object_center_params
self._od_params = object_detection_params
self._kp_params_dict = keypoint_params_dict
self._mask_params = mask_params
if densepose_params is not None and mask_params is None:
raise ValueError('To run DensePose prediction, `mask_params` must also '
'be supplied.')
self._densepose_params = densepose_params
self._track_params = track_params
self._temporal_offset_params = temporal_offset_params
self._use_depthwise = use_depthwise
# Construct the prediction head nets.
self._prediction_head_dict = self._construct_prediction_heads(
num_classes,
self._num_feature_outputs,
class_prediction_bias_init=self._center_params.heatmap_bias_init)
# Initialize the target assigners.
self._target_assigner_dict = self._initialize_target_assigners(
stride=self._stride,
min_box_overlap_iou=self._center_params.min_box_overlap_iou)
# Will be used in VOD single_frame_meta_arch for tensor reshape.
self._batched_prediction_tensor_names = []
super(CenterNetMetaArch, self).__init__(num_classes)
@property
def batched_prediction_tensor_names(self):
if not self._batched_prediction_tensor_names:
raise RuntimeError('Must call predict() method to get batched prediction '
'tensor names.')
return self._batched_prediction_tensor_names
def _construct_prediction_heads(self, num_classes, num_feature_outputs,
class_prediction_bias_init):
"""Constructs the prediction heads based on the specific parameters.
Args:
num_classes: An integer indicating how many classes in total to predict.
num_feature_outputs: An integer indicating how many feature outputs to use
for calculating the loss. The Objects as Points paper attaches loss
functions to multiple (`num_feature_outputs`) feature maps in the the
backbone. E.g. for the hourglass backbone, `num_feature_outputs` is 2.
class_prediction_bias_init: float, the initial value of bias in the
convolutional kernel of the class prediction head. If set to None, the
bias is initialized with zeros.
Returns:
A dictionary of keras modules generated by calling make_prediction_net
function. It will also create and set a private member of the class when
learning the tracking task.
"""
prediction_heads = {}
prediction_heads[OBJECT_CENTER] = [
make_prediction_net(num_classes, bias_fill=class_prediction_bias_init,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
if self._od_params is not None:
prediction_heads[BOX_SCALE] = [
make_prediction_net(
NUM_SIZE_CHANNELS, use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
prediction_heads[BOX_OFFSET] = [
make_prediction_net(
NUM_OFFSET_CHANNELS, use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
if self._kp_params_dict is not None:
for task_name, kp_params in self._kp_params_dict.items():
num_keypoints = len(kp_params.keypoint_indices)
# pylint: disable=g-complex-comprehension
prediction_heads[get_keypoint_name(task_name, KEYPOINT_HEATMAP)] = [
make_prediction_net(
num_keypoints,
bias_fill=kp_params.heatmap_bias_init,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
# pylint: enable=g-complex-comprehension
prediction_heads[get_keypoint_name(task_name, KEYPOINT_REGRESSION)] = [
make_prediction_net(NUM_OFFSET_CHANNELS * num_keypoints,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
if kp_params.per_keypoint_offset:
prediction_heads[get_keypoint_name(task_name, KEYPOINT_OFFSET)] = [
make_prediction_net(NUM_OFFSET_CHANNELS * num_keypoints,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
else:
prediction_heads[get_keypoint_name(task_name, KEYPOINT_OFFSET)] = [
make_prediction_net(NUM_OFFSET_CHANNELS,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
# pylint: disable=g-complex-comprehension
if self._mask_params is not None:
prediction_heads[SEGMENTATION_HEATMAP] = [
make_prediction_net(
num_classes,
bias_fill=self._mask_params.heatmap_bias_init,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)]
if self._densepose_params is not None:
prediction_heads[DENSEPOSE_HEATMAP] = [
make_prediction_net(
self._densepose_params.num_parts,
bias_fill=self._densepose_params.heatmap_bias_init,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)]
prediction_heads[DENSEPOSE_REGRESSION] = [
make_prediction_net(2 * self._densepose_params.num_parts,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
# pylint: enable=g-complex-comprehension
if self._track_params is not None:
prediction_heads[TRACK_REID] = [
make_prediction_net(self._track_params.reid_embed_size,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)]
# Creates a classification network to train object embeddings by learning
# a projection from embedding space to object track ID space.
self.track_reid_classification_net = tf.keras.Sequential()
for _ in range(self._track_params.num_fc_layers - 1):
self.track_reid_classification_net.add(
tf.keras.layers.Dense(self._track_params.reid_embed_size,
input_shape=(
self._track_params.reid_embed_size,)))
self.track_reid_classification_net.add(
tf.keras.layers.BatchNormalization())
self.track_reid_classification_net.add(tf.keras.layers.ReLU())
self.track_reid_classification_net.add(
tf.keras.layers.Dense(self._track_params.num_track_ids,
input_shape=(
self._track_params.reid_embed_size,)))
if self._temporal_offset_params is not None:
prediction_heads[TEMPORAL_OFFSET] = [
make_prediction_net(NUM_OFFSET_CHANNELS,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
return prediction_heads
def _initialize_target_assigners(self, stride, min_box_overlap_iou):
"""Initializes the target assigners and puts them in a dictionary.
Args:
stride: An integer indicating the stride of the image.
min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes
need have with groundtruth boxes to not be penalized. This is used for
computing the class specific center heatmaps.
Returns:
A dictionary of initialized target assigners for each task.
"""
target_assigners = {}
target_assigners[OBJECT_CENTER] = (
cn_assigner.CenterNetCenterHeatmapTargetAssigner(
stride, min_box_overlap_iou))
if self._od_params is not None:
target_assigners[DETECTION_TASK] = (
cn_assigner.CenterNetBoxTargetAssigner(stride))
if self._kp_params_dict is not None:
for task_name, kp_params in self._kp_params_dict.items():
target_assigners[task_name] = (
cn_assigner.CenterNetKeypointTargetAssigner(
stride=stride,
class_id=kp_params.class_id,
keypoint_indices=kp_params.keypoint_indices,
keypoint_std_dev=kp_params.keypoint_std_dev,
peak_radius=kp_params.offset_peak_radius,
per_keypoint_offset=kp_params.per_keypoint_offset))
if self._mask_params is not None:
target_assigners[SEGMENTATION_TASK] = (
cn_assigner.CenterNetMaskTargetAssigner(stride))
if self._densepose_params is not None:
dp_stride = 1 if self._densepose_params.upsample_to_input_res else stride
target_assigners[DENSEPOSE_TASK] = (
cn_assigner.CenterNetDensePoseTargetAssigner(dp_stride))
if self._track_params is not None:
target_assigners[TRACK_TASK] = (
cn_assigner.CenterNetTrackTargetAssigner(
stride, self._track_params.num_track_ids))
if self._temporal_offset_params is not None:
target_assigners[TEMPORALOFFSET_TASK] = (
cn_assigner.CenterNetTemporalOffsetTargetAssigner(stride))
return target_assigners
def _compute_object_center_loss(self, input_height, input_width,
object_center_predictions, per_pixel_weights):
"""Computes the object center loss.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
object_center_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_classes] representing the object center
feature maps.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A float scalar tensor representing the object center loss per instance.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
if self._center_params.use_only_known_classes:
gt_labeled_classes_list = self.groundtruth_lists(
fields.InputDataFields.groundtruth_labeled_classes)
batch_labeled_classes = tf.stack(gt_labeled_classes_list, axis=0)
batch_labeled_classes_shape = tf.shape(batch_labeled_classes)
batch_labeled_classes = tf.reshape(
batch_labeled_classes,
[batch_labeled_classes_shape[0], 1, batch_labeled_classes_shape[-1]])
per_pixel_weights = per_pixel_weights * batch_labeled_classes
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[OBJECT_CENTER]
heatmap_targets = assigner.assign_center_targets_from_boxes(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_classes_list=gt_classes_list,
gt_weights_list=gt_weights_list)
flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
loss = 0.0
object_center_loss = self._center_params.classification_loss
# Loop through each feature output head.
for pred in object_center_predictions:
pred = _flatten_spatial_dimensions(pred)
loss += object_center_loss(
pred, flattened_heatmap_targets, weights=per_pixel_weights)
loss_per_instance = tf.reduce_sum(loss) / (
float(len(object_center_predictions)) * num_boxes)
return loss_per_instance
def _compute_object_detection_losses(self, input_height, input_width,
prediction_dict, per_pixel_weights):
"""Computes the weighted object detection losses.
This wrapper function calls the function which computes the losses for
object detection task and applies corresponding weights to the losses.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: A dictionary holding predicted tensors output by
"predict" function. See "predict" function for more detailed
description.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A dictionary of scalar float tensors representing the weighted losses for
object detection task:
BOX_SCALE: the weighted scale (height/width) loss.
BOX_OFFSET: the weighted object offset loss.
"""
od_scale_loss, od_offset_loss = self._compute_box_scale_and_offset_loss(
scale_predictions=prediction_dict[BOX_SCALE],
offset_predictions=prediction_dict[BOX_OFFSET],
input_height=input_height,
input_width=input_width)
loss_dict = {}
loss_dict[BOX_SCALE] = (
self._od_params.scale_loss_weight * od_scale_loss)
loss_dict[BOX_OFFSET] = (
self._od_params.offset_loss_weight * od_offset_loss)
return loss_dict
def _compute_box_scale_and_offset_loss(self, input_height, input_width,
scale_predictions, offset_predictions):
"""Computes the scale loss of the object detection task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
scale_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for object scale (i.e height and width).
offset_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for object offset.
Returns:
A tuple of two losses:
scale_loss: A float scalar tensor representing the object height/width
loss normalized by total number of boxes.
offset_loss: A float scalar tensor representing the object offset loss
normalized by total number of boxes
"""
# TODO(vighneshb) Explore a size invariant version of scale loss.
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
num_predictions = float(len(scale_predictions))
assigner = self._target_assigner_dict[DETECTION_TASK]
(batch_indices, batch_height_width_targets, batch_offset_targets,
batch_weights) = assigner.assign_size_and_offset_targets(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
scale_loss = 0
offset_loss = 0
localization_loss_fn = self._od_params.localization_loss
for scale_pred, offset_pred in zip(scale_predictions, offset_predictions):
# Compute the scale loss.
scale_pred = cn_assigner.get_batch_predictions_from_indices(
scale_pred, batch_indices)
scale_loss += localization_loss_fn(
scale_pred, batch_height_width_targets, weights=batch_weights)
# Compute the offset loss.
offset_pred = cn_assigner.get_batch_predictions_from_indices(
offset_pred, batch_indices)
offset_loss += localization_loss_fn(
offset_pred, batch_offset_targets, weights=batch_weights)
scale_loss = tf.reduce_sum(scale_loss) / (
num_predictions * num_boxes)
offset_loss = tf.reduce_sum(offset_loss) / (
num_predictions * num_boxes)
return scale_loss, offset_loss
def _compute_keypoint_estimation_losses(self, task_name, input_height,
input_width, prediction_dict,
per_pixel_weights):
"""Computes the weighted keypoint losses."""
kp_params = self._kp_params_dict[task_name]
heatmap_key = get_keypoint_name(task_name, KEYPOINT_HEATMAP)
offset_key = get_keypoint_name(task_name, KEYPOINT_OFFSET)
regression_key = get_keypoint_name(task_name, KEYPOINT_REGRESSION)
heatmap_loss = self._compute_kp_heatmap_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
heatmap_predictions=prediction_dict[heatmap_key],
classification_loss_fn=kp_params.classification_loss,
per_pixel_weights=per_pixel_weights)
offset_loss = self._compute_kp_offset_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
offset_predictions=prediction_dict[offset_key],
localization_loss_fn=kp_params.localization_loss)
reg_loss = self._compute_kp_regression_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
regression_predictions=prediction_dict[regression_key],
localization_loss_fn=kp_params.localization_loss)
loss_dict = {}
loss_dict[heatmap_key] = (
kp_params.keypoint_heatmap_loss_weight * heatmap_loss)
loss_dict[offset_key] = (
kp_params.keypoint_offset_loss_weight * offset_loss)
loss_dict[regression_key] = (
kp_params.keypoint_regression_loss_weight * reg_loss)
return loss_dict
def _compute_kp_heatmap_loss(self, input_height, input_width, task_name,
heatmap_predictions, classification_loss_fn,
per_pixel_weights):
"""Computes the heatmap loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
heatmap_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_keypoints] representing the prediction heads
of the model for keypoint heatmap.
classification_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
loss: A float scalar tensor representing the object keypoint heatmap loss
normalized by number of instances.
"""
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
assigner = self._target_assigner_dict[task_name]
(keypoint_heatmap, num_instances_per_kp_type,
valid_mask_batch) = assigner.assign_keypoint_heatmap_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_classes_list=gt_classes_list,
gt_boxes_list=gt_boxes_list)
flattened_valid_mask = _flatten_spatial_dimensions(
tf.expand_dims(valid_mask_batch, axis=-1))
flattened_heapmap_targets = _flatten_spatial_dimensions(keypoint_heatmap)
# Sum over the number of instances per keypoint types to get the total
# number of keypoints. Note that this is used to normalized the loss and we
# keep the minimum value to be 1 to avoid generating weird loss value when
# no keypoint is in the image batch.
num_instances = tf.maximum(
tf.cast(tf.reduce_sum(num_instances_per_kp_type), dtype=tf.float32),
1.0)
loss = 0.0
# Loop through each feature output head.
for pred in heatmap_predictions:
pred = _flatten_spatial_dimensions(pred)
unweighted_loss = classification_loss_fn(
pred,
flattened_heapmap_targets,
weights=tf.ones_like(per_pixel_weights))
# Apply the weights after the loss function to have full control over it.
loss += unweighted_loss * per_pixel_weights * flattened_valid_mask
loss = tf.reduce_sum(loss) / (
float(len(heatmap_predictions)) * num_instances)
return loss
def _compute_kp_offset_loss(self, input_height, input_width, task_name,
offset_predictions, localization_loss_fn):
"""Computes the offset loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
offset_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for keypoint offset.
localization_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the keypoint offset predictions in CenterNet.
Returns:
loss: A float scalar tensor representing the keypoint offset loss
normalized by number of total keypoints.
"""
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_offsets,
batch_weights) = assigner.assign_keypoints_offset_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_classes_list=gt_classes_list)
# Keypoint offset loss.
loss = 0.0
for prediction in offset_predictions:
batch_size, out_height, out_width, channels = _get_shape(prediction, 4)
if channels > 2:
prediction = tf.reshape(
prediction, shape=[batch_size, out_height, out_width, -1, 2])
prediction = cn_assigner.get_batch_predictions_from_indices(
prediction, batch_indices)
# The dimensions passed are not as per the doc string but the loss
# still computes the correct value.
unweighted_loss = localization_loss_fn(
prediction,
batch_offsets,
weights=tf.expand_dims(tf.ones_like(batch_weights), -1))
# Apply the weights after the loss function to have full control over it.
loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1)
loss = tf.reduce_sum(loss) / (
float(len(offset_predictions)) *
tf.maximum(tf.reduce_sum(batch_weights), 1.0))
return loss
def _compute_kp_regression_loss(self, input_height, input_width, task_name,
regression_predictions, localization_loss_fn):
"""Computes the keypoint regression loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
regression_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2 * num_keypoints] representing the prediction
heads of the model for keypoint regression offset.
localization_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the keypoint regression offset predictions in
CenterNet.
Returns:
loss: A float scalar tensor representing the keypoint regression offset
loss normalized by number of total keypoints.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
# keypoint regression offset loss.
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_regression_offsets,
batch_weights) = assigner.assign_joint_regression_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list,
gt_weights_list=gt_weights_list,
gt_boxes_list=gt_boxes_list)
loss = 0.0
for prediction in regression_predictions:
batch_size, out_height, out_width, _ = _get_shape(prediction, 4)
reshaped_prediction = tf.reshape(
prediction, shape=[batch_size, out_height, out_width, -1, 2])
reg_prediction = cn_assigner.get_batch_predictions_from_indices(
reshaped_prediction, batch_indices)
unweighted_loss = localization_loss_fn(
reg_prediction,
batch_regression_offsets,
weights=tf.expand_dims(tf.ones_like(batch_weights), -1))
# Apply the weights after the loss function to have full control over it.
loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1)
loss = tf.reduce_sum(loss) / (
float(len(regression_predictions)) *
tf.maximum(tf.reduce_sum(batch_weights), 1.0))
return loss
def _compute_segmentation_losses(self, prediction_dict, per_pixel_weights):
"""Computes all the losses associated with segmentation.
Args:
prediction_dict: The dictionary returned from the predict() method.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A dictionary with segmentation losses.
"""
segmentation_heatmap = prediction_dict[SEGMENTATION_HEATMAP]
mask_loss = self._compute_mask_loss(
segmentation_heatmap, per_pixel_weights)
losses = {
SEGMENTATION_HEATMAP: mask_loss
}
return losses
def _compute_mask_loss(self, segmentation_predictions,
per_pixel_weights):
"""Computes the mask loss.
Args:
segmentation_predictions: A list of float32 tensors of shape [batch_size,
out_height, out_width, num_classes].
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A float scalar tensor representing the mask loss.
"""
gt_masks_list = self.groundtruth_lists(fields.BoxListFields.masks)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[SEGMENTATION_TASK]
heatmap_targets = assigner.assign_segmentation_targets(
gt_masks_list=gt_masks_list,
gt_classes_list=gt_classes_list)
flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)
loss = 0.0
mask_loss_fn = self._mask_params.classification_loss
total_pixels_in_loss = tf.reduce_sum(per_pixel_weights)
# Loop through each feature output head.
for pred in segmentation_predictions:
pred = _flatten_spatial_dimensions(pred)
loss += mask_loss_fn(
pred, flattened_heatmap_targets, weights=per_pixel_weights)
# TODO(ronnyvotel): Consider other ways to normalize loss.
total_loss = tf.reduce_sum(loss) / (
float(len(segmentation_predictions)) * total_pixels_in_loss)
return total_loss
def _compute_densepose_losses(self, input_height, input_width,
prediction_dict):
"""Computes the weighted DensePose losses.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: A dictionary holding predicted tensors output by the
"predict" function. See the "predict" function for more detailed
description.
Returns:
A dictionary of scalar float tensors representing the weighted losses for
the DensePose task:
DENSEPOSE_HEATMAP: the weighted part segmentation loss.
DENSEPOSE_REGRESSION: the weighted part surface coordinate loss.
"""
dp_heatmap_loss, dp_regression_loss = (
self._compute_densepose_part_and_coordinate_losses(
input_height=input_height,
input_width=input_width,
part_predictions=prediction_dict[DENSEPOSE_HEATMAP],
surface_coord_predictions=prediction_dict[DENSEPOSE_REGRESSION]))
loss_dict = {}
loss_dict[DENSEPOSE_HEATMAP] = (
self._densepose_params.part_loss_weight * dp_heatmap_loss)
loss_dict[DENSEPOSE_REGRESSION] = (
self._densepose_params.coordinate_loss_weight * dp_regression_loss)
return loss_dict
def _compute_densepose_part_and_coordinate_losses(
self, input_height, input_width, part_predictions,
surface_coord_predictions):
"""Computes the individual losses for the DensePose task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
part_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_parts].
surface_coord_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2 * num_parts].
Returns:
A tuple with two scalar loss tensors: part_prediction_loss and
surface_coord_loss.
"""
gt_dp_num_points_list = self.groundtruth_lists(
fields.BoxListFields.densepose_num_points)
gt_dp_part_ids_list = self.groundtruth_lists(
fields.BoxListFields.densepose_part_ids)
gt_dp_surface_coords_list = self.groundtruth_lists(
fields.BoxListFields.densepose_surface_coords)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[DENSEPOSE_TASK]
batch_indices, batch_part_ids, batch_surface_coords, batch_weights = (
assigner.assign_part_and_coordinate_targets(
height=input_height,
width=input_width,
gt_dp_num_points_list=gt_dp_num_points_list,
gt_dp_part_ids_list=gt_dp_part_ids_list,
gt_dp_surface_coords_list=gt_dp_surface_coords_list,
gt_weights_list=gt_weights_list))
part_prediction_loss = 0
surface_coord_loss = 0
classification_loss_fn = self._densepose_params.classification_loss
localization_loss_fn = self._densepose_params.localization_loss
num_predictions = float(len(part_predictions))
num_valid_points = tf.math.count_nonzero(batch_weights)
num_valid_points = tf.cast(tf.math.maximum(num_valid_points, 1), tf.float32)
for part_pred, surface_coord_pred in zip(part_predictions,
surface_coord_predictions):
# Potentially upsample the feature maps, so that better quality (i.e.
# higher res) groundtruth can be applied.
if self._densepose_params.upsample_to_input_res:
part_pred = tf.keras.layers.UpSampling2D(
self._stride, interpolation=self._densepose_params.upsample_method)(
part_pred)
surface_coord_pred = tf.keras.layers.UpSampling2D(
self._stride, interpolation=self._densepose_params.upsample_method)(
surface_coord_pred)
# Compute the part prediction loss.
part_pred = cn_assigner.get_batch_predictions_from_indices(
part_pred, batch_indices[:, 0:3])
part_prediction_loss += classification_loss_fn(
part_pred[:, tf.newaxis, :],
batch_part_ids[:, tf.newaxis, :],
weights=batch_weights[:, tf.newaxis, tf.newaxis])
# Compute the surface coordinate loss.
batch_size, out_height, out_width, _ = _get_shape(
surface_coord_pred, 4)
surface_coord_pred = tf.reshape(
surface_coord_pred, [batch_size, out_height, out_width, -1, 2])
surface_coord_pred = cn_assigner.get_batch_predictions_from_indices(
surface_coord_pred, batch_indices)
surface_coord_loss += localization_loss_fn(
surface_coord_pred,
batch_surface_coords,
weights=batch_weights[:, tf.newaxis])
part_prediction_loss = tf.reduce_sum(part_prediction_loss) / (
num_predictions * num_valid_points)
surface_coord_loss = tf.reduce_sum(surface_coord_loss) / (
num_predictions * num_valid_points)
return part_prediction_loss, surface_coord_loss
def _compute_track_losses(self, input_height, input_width, prediction_dict):
"""Computes all the losses associated with tracking.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: The dictionary returned from the predict() method.
Returns:
A dictionary with tracking losses.
"""
object_reid_predictions = prediction_dict[TRACK_REID]
embedding_loss = self._compute_track_embedding_loss(
input_height=input_height,
input_width=input_width,
object_reid_predictions=object_reid_predictions)
losses = {
TRACK_REID: embedding_loss
}
return losses
def _compute_track_embedding_loss(self, input_height, input_width,
object_reid_predictions):
"""Computes the object ReID loss.
The embedding is trained as a classification task where the target is the
ID of each track among all tracks in the whole dataset.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
object_reid_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, reid_embed_size] representing the object
embedding feature maps.
Returns:
A float scalar tensor representing the object ReID loss per instance.
"""
gt_track_ids_list = self.groundtruth_lists(fields.BoxListFields.track_ids)
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[TRACK_TASK]
batch_indices, batch_weights, track_targets = assigner.assign_track_targets(
height=input_height,
width=input_width,
gt_track_ids_list=gt_track_ids_list,
gt_boxes_list=gt_boxes_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
loss = 0.0
object_reid_loss = self._track_params.classification_loss
# Loop through each feature output head.
for pred in object_reid_predictions:
embedding_pred = cn_assigner.get_batch_predictions_from_indices(
pred, batch_indices)
reid_classification = self.track_reid_classification_net(embedding_pred)
loss += object_reid_loss(
reid_classification, track_targets, weights=batch_weights)
loss_per_instance = tf.reduce_sum(loss) / (
float(len(object_reid_predictions)) * num_boxes)
return loss_per_instance
def _compute_temporal_offset_loss(self, input_height,
input_width, prediction_dict):
"""Computes the temporal offset loss for tracking.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: The dictionary returned from the predict() method.
Returns:
A dictionary with track/temporal_offset losses.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_offsets_list = self.groundtruth_lists(
fields.BoxListFields.temporal_offsets)
gt_match_list = self.groundtruth_lists(
fields.BoxListFields.track_match_flags)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = tf.cast(
get_num_instances_from_weights(gt_weights_list), tf.float32)
offset_predictions = prediction_dict[TEMPORAL_OFFSET]
num_predictions = float(len(offset_predictions))
assigner = self._target_assigner_dict[TEMPORALOFFSET_TASK]
(batch_indices, batch_offset_targets,
batch_weights) = assigner.assign_temporal_offset_targets(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_offsets_list=gt_offsets_list,
gt_match_list=gt_match_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
offset_loss_fn = self._temporal_offset_params.localization_loss
loss_dict = {}
offset_loss = 0
for offset_pred in offset_predictions:
offset_pred = cn_assigner.get_batch_predictions_from_indices(
offset_pred, batch_indices)
offset_loss += offset_loss_fn(offset_pred[:, None],
batch_offset_targets[:, None],
weights=batch_weights)
offset_loss = tf.reduce_sum(offset_loss) / (num_predictions * num_boxes)
loss_dict[TEMPORAL_OFFSET] = offset_loss
return loss_dict
def preprocess(self, inputs):
outputs = shape_utils.resize_images_and_return_shapes(
inputs, self._image_resizer_fn)
resized_inputs, true_image_shapes = outputs
return (self._feature_extractor.preprocess(resized_inputs),
true_image_shapes)
def predict(self, preprocessed_inputs, _):
"""Predicts CenterNet prediction tensors given an input batch.
Feature extractors are free to produce predictions from multiple feature
maps and therefore we return a dictionary mapping strings to lists.
E.g. the hourglass backbone produces two feature maps.
Args:
preprocessed_inputs: a [batch, height, width, channels] float32 tensor
representing a batch of images.
Returns:
prediction_dict: a dictionary holding predicted tensors with
'preprocessed_inputs' - The input image after being resized and
preprocessed by the feature extractor.
'object_center' - A list of size num_feature_outputs containing
float tensors of size [batch_size, output_height, output_width,
num_classes] representing the predicted object center heatmap logits.
'box/scale' - [optional] A list of size num_feature_outputs holding
float tensors of size [batch_size, output_height, output_width, 2]
representing the predicted box height and width at each output
location. This field exists only when object detection task is
specified.
'box/offset' - [optional] A list of size num_feature_outputs holding
float tensors of size [batch_size, output_height, output_width, 2]
representing the predicted y and x offsets at each output location.
'$TASK_NAME/keypoint_heatmap' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, num_keypoints] representing the predicted
keypoint heatmap logits.
'$TASK_NAME/keypoint_offset' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, 2] representing the predicted keypoint
offsets at each output location.
'$TASK_NAME/keypoint_regression' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, 2 * num_keypoints] representing the
predicted keypoint regression at each output location.
'segmentation/heatmap' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, num_classes] representing the mask logits.
'densepose/heatmap' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, num_parts] representing the mask logits for each part.
'densepose/regression' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, 2 * num_parts] representing the DensePose surface
coordinate predictions.
Note the $TASK_NAME is provided by the KeypointEstimation namedtuple
used to differentiate between different keypoint tasks.
"""
features_list = self._feature_extractor(preprocessed_inputs)
predictions = {}
for head_name, heads in self._prediction_head_dict.items():
predictions[head_name] = [
head(feature) for (feature, head) in zip(features_list, heads)
]
predictions['preprocessed_inputs'] = preprocessed_inputs
self._batched_prediction_tensor_names = predictions.keys()
return predictions
def loss(self, prediction_dict, true_image_shapes, scope=None):
"""Computes scalar loss tensors with respect to provided groundtruth.
This function implements the various CenterNet losses.
Args:
prediction_dict: a dictionary holding predicted tensors returned by
"predict" function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
scope: Optional scope name.
Returns:
A dictionary mapping the keys [
'Loss/object_center',
'Loss/box/scale', (optional)
'Loss/box/offset', (optional)
'Loss/$TASK_NAME/keypoint/heatmap', (optional)
'Loss/$TASK_NAME/keypoint/offset', (optional)
'Loss/$TASK_NAME/keypoint/regression', (optional)
'Loss/segmentation/heatmap', (optional)
'Loss/densepose/heatmap', (optional)
'Loss/densepose/regression', (optional)
'Loss/track/reid'] (optional)
'Loss/track/offset'] (optional)
scalar tensors corresponding to the losses for different tasks. Note the
$TASK_NAME is provided by the KeypointEstimation namedtuple used to
differentiate between different keypoint tasks.
"""
_, input_height, input_width, _ = _get_shape(
prediction_dict['preprocessed_inputs'], 4)
output_height, output_width = (input_height // self._stride,
input_width // self._stride)
# TODO(vighneshb) Explore whether using floor here is safe.
output_true_image_shapes = tf.ceil(
tf.to_float(true_image_shapes) / self._stride)
valid_anchor_weights = get_valid_anchor_weights_in_flattened_image(
output_true_image_shapes, output_height, output_width)
valid_anchor_weights = tf.expand_dims(valid_anchor_weights, 2)
object_center_loss = self._compute_object_center_loss(
object_center_predictions=prediction_dict[OBJECT_CENTER],
input_height=input_height,
input_width=input_width,
per_pixel_weights=valid_anchor_weights)
losses = {
OBJECT_CENTER:
self._center_params.object_center_loss_weight * object_center_loss
}
if self._od_params is not None:
od_losses = self._compute_object_detection_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in od_losses:
od_losses[key] = od_losses[key] * self._od_params.task_loss_weight
losses.update(od_losses)
if self._kp_params_dict is not None:
for task_name, params in self._kp_params_dict.items():
kp_losses = self._compute_keypoint_estimation_losses(
task_name=task_name,
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in kp_losses:
kp_losses[key] = kp_losses[key] * params.task_loss_weight
losses.update(kp_losses)
if self._mask_params is not None:
seg_losses = self._compute_segmentation_losses(
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in seg_losses:
seg_losses[key] = seg_losses[key] * self._mask_params.task_loss_weight
losses.update(seg_losses)
if self._densepose_params is not None:
densepose_losses = self._compute_densepose_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in densepose_losses:
densepose_losses[key] = (
densepose_losses[key] * self._densepose_params.task_loss_weight)
losses.update(densepose_losses)
if self._track_params is not None:
track_losses = self._compute_track_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in track_losses:
track_losses[key] = (
track_losses[key] * self._track_params.task_loss_weight)
losses.update(track_losses)
if self._temporal_offset_params is not None:
offset_losses = self._compute_temporal_offset_loss(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in offset_losses:
offset_losses[key] = (
offset_losses[key] * self._temporal_offset_params.task_loss_weight)
losses.update(offset_losses)
# Prepend the LOSS_KEY_PREFIX to the keys in the dictionary such that the
# losses will be grouped together in Tensorboard.
return dict([('%s/%s' % (LOSS_KEY_PREFIX, key), val)
for key, val in losses.items()])
def postprocess(self, prediction_dict, true_image_shapes, **params):
"""Produces boxes given a prediction dict returned by predict().
Although predict returns a list of tensors, only the last tensor in
each list is used for making box predictions.
Args:
prediction_dict: a dictionary holding predicted tensors from "predict"
function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
**params: Currently ignored.
Returns:
detections: a dictionary containing the following fields
detection_boxes - A tensor of shape [batch, max_detections, 4]
holding the predicted boxes.
detection_boxes_strided: A tensor of shape [batch_size, num_detections,
4] holding the predicted boxes in absolute coordinates of the
feature extractor's final layer output.
detection_scores: A tensor of shape [batch, max_detections] holding
the predicted score for each box.
detection_classes: An integer tensor of shape [batch, max_detections]
containing the detected class for each box.
num_detections: An integer tensor of shape [batch] containing the
number of detected boxes for each sample in the batch.
detection_keypoints: (Optional) A float tensor of shape [batch,
max_detections, num_keypoints, 2] with normalized keypoints. Any
invalid keypoints have their coordinates and scores set to 0.0.
detection_keypoint_scores: (Optional) A float tensor of shape [batch,
max_detection, num_keypoints] with scores for each keypoint.
detection_masks: (Optional) A uint8 tensor of shape [batch,
max_detections, mask_height, mask_width] with masks for each
detection. Background is specified with 0, and foreground is specified
with positive integers (1 for standard instance segmentation mask, and
1-indexed parts for DensePose task).
detection_surface_coords: (Optional) A float32 tensor of shape [batch,
max_detection, mask_height, mask_width, 2] with DensePose surface
coordinates, in (v, u) format.
detection_embeddings: (Optional) A float tensor of shape [batch,
max_detections, reid_embed_size] containing object embeddings.
"""
object_center_prob = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][-1])
# Get x, y and channel indices corresponding to the top indices in the class
# center predictions.
detection_scores, y_indices, x_indices, channel_indices = (
top_k_feature_map_locations(
object_center_prob, max_pool_kernel_size=3,
k=self._center_params.max_box_predictions))
boxes_strided, classes, scores, num_detections = (
prediction_tensors_to_boxes(
detection_scores, y_indices, x_indices, channel_indices,
prediction_dict[BOX_SCALE][-1], prediction_dict[BOX_OFFSET][-1]))
boxes = convert_strided_predictions_to_normalized_boxes(
boxes_strided, self._stride, true_image_shapes)
postprocess_dict = {
fields.DetectionResultFields.detection_boxes: boxes,
fields.DetectionResultFields.detection_scores: scores,
fields.DetectionResultFields.detection_classes: classes,
fields.DetectionResultFields.num_detections: num_detections,
'detection_boxes_strided': boxes_strided
}
if self._kp_params_dict:
keypoints, keypoint_scores = self._postprocess_keypoints(
prediction_dict, classes, y_indices, x_indices,
boxes_strided, num_detections)
keypoints, keypoint_scores = (
convert_strided_predictions_to_normalized_keypoints(
keypoints, keypoint_scores, self._stride, true_image_shapes,
clip_out_of_frame_keypoints=True))
postprocess_dict.update({
fields.DetectionResultFields.detection_keypoints: keypoints,
fields.DetectionResultFields.detection_keypoint_scores:
keypoint_scores
})
if self._mask_params:
masks = tf.nn.sigmoid(prediction_dict[SEGMENTATION_HEATMAP][-1])
densepose_part_heatmap, densepose_surface_coords = None, None
densepose_class_index = 0
if self._densepose_params:
densepose_part_heatmap = prediction_dict[DENSEPOSE_HEATMAP][-1]
densepose_surface_coords = prediction_dict[DENSEPOSE_REGRESSION][-1]
densepose_class_index = self._densepose_params.class_id
instance_masks, surface_coords = (
convert_strided_predictions_to_instance_masks(
boxes, classes, masks, true_image_shapes,
densepose_part_heatmap, densepose_surface_coords,
stride=self._stride, mask_height=self._mask_params.mask_height,
mask_width=self._mask_params.mask_width,
score_threshold=self._mask_params.score_threshold,
densepose_class_index=densepose_class_index))
postprocess_dict[
fields.DetectionResultFields.detection_masks] = instance_masks
if self._densepose_params:
postprocess_dict[
fields.DetectionResultFields.detection_surface_coords] = (
surface_coords)
if self._track_params:
embeddings = self._postprocess_embeddings(prediction_dict,
y_indices, x_indices)
postprocess_dict.update({
fields.DetectionResultFields.detection_embeddings: embeddings
})
if self._temporal_offset_params:
offsets = prediction_tensors_to_temporal_offsets(
y_indices, x_indices,
prediction_dict[TEMPORAL_OFFSET][-1])
postprocess_dict[fields.DetectionResultFields.detection_offsets] = offsets
return postprocess_dict
def _postprocess_embeddings(self, prediction_dict, y_indices, x_indices):
"""Performs postprocessing on embedding predictions.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain embedding prediction
feature maps for tracking task.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
Returns:
embeddings: A [batch_size, max_detection, reid_embed_size] float32
tensor with L2 normalized embeddings extracted from detection box
centers.
"""
embedding_predictions = prediction_dict[TRACK_REID][-1]
embeddings = predicted_embeddings_at_object_centers(
embedding_predictions, y_indices, x_indices)
embeddings, _ = tf.linalg.normalize(embeddings, axis=-1)
return embeddings
def _postprocess_keypoints(self, prediction_dict, classes, y_indices,
x_indices, boxes, num_detections):
"""Performs postprocessing on keypoint predictions.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain keypoint prediction
feature maps for each keypoint task.
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
boxes: A [batch_size, max_detections, 4] float32 tensor with bounding
boxes in (un-normalized) output space.
num_detections: A [batch_size] int tensor with the number of valid
detections for each image.
Returns:
A tuple of
keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32
tensor with keypoints in the output (strided) coordinate frame.
keypoint_scores: a [batch_size, max_detections, num_total_keypoints]
float32 tensor with keypoint scores.
"""
total_num_keypoints = sum(len(kp_dict.keypoint_indices) for kp_dict
in self._kp_params_dict.values())
batch_size, max_detections, _ = _get_shape(boxes, 3)
kpt_coords_for_example_list = []
kpt_scores_for_example_list = []
for ex_ind in range(batch_size):
kpt_coords_for_class_list = []
kpt_scores_for_class_list = []
instance_inds_for_class_list = []
for task_name, kp_params in self._kp_params_dict.items():
keypoint_heatmap = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1]
keypoint_offsets = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1]
keypoint_regression = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1]
instance_inds = self._get_instance_indices(
classes, num_detections, ex_ind, kp_params.class_id)
num_ind = _get_shape(instance_inds, 1)
def true_fn(
keypoint_heatmap, keypoint_offsets, keypoint_regression,
classes, y_indices, x_indices, boxes, instance_inds,
ex_ind, kp_params):
"""Logics to execute when instance_inds is not an empty set."""
# Postprocess keypoints and scores for class and single image. Shapes
# are [1, num_instances_i, num_keypoints_i, 2] and
# [1, num_instances_i, num_keypoints_i], respectively. Note that
# num_instances_i and num_keypoints_i refers to the number of
# instances and keypoints for class i, respectively.
kpt_coords_for_class, kpt_scores_for_class = (
self._postprocess_keypoints_for_class_and_image(
keypoint_heatmap, keypoint_offsets, keypoint_regression,
classes, y_indices, x_indices, boxes, instance_inds,
ex_ind, kp_params))
# Expand keypoint dimension (with padding) so that coordinates and
# scores have shape [1, num_instances_i, num_total_keypoints, 2] and
# [1, num_instances_i, num_total_keypoints], respectively.
kpts_coords_for_class_padded, kpt_scores_for_class_padded = (
_pad_to_full_keypoint_dim(
kpt_coords_for_class, kpt_scores_for_class,
kp_params.keypoint_indices, total_num_keypoints))
return kpts_coords_for_class_padded, kpt_scores_for_class_padded
def false_fn():
"""Logics to execute when the instance_inds is an empty set."""
return (tf.zeros([1, 0, total_num_keypoints, 2], dtype=tf.float32),
tf.zeros([1, 0, total_num_keypoints], dtype=tf.float32))
true_fn = functools.partial(
true_fn, keypoint_heatmap, keypoint_offsets, keypoint_regression,
classes, y_indices, x_indices, boxes, instance_inds, ex_ind,
kp_params)
# Use dimension values instead of tf.size for tf.lite compatibility.
results = tf.cond(num_ind[0] > 0, true_fn, false_fn)
kpt_coords_for_class_list.append(results[0])
kpt_scores_for_class_list.append(results[1])
instance_inds_for_class_list.append(instance_inds)
# Concatenate all keypoints across all classes (single example).
kpt_coords_for_example = tf.concat(kpt_coords_for_class_list, axis=1)
kpt_scores_for_example = tf.concat(kpt_scores_for_class_list, axis=1)
instance_inds_for_example = tf.concat(instance_inds_for_class_list,
axis=0)
# Use dimension values instead of tf.size for tf.lite compatibility.
num_inds = _get_shape(instance_inds_for_example, 1)
if num_inds[0] > 0:
# Scatter into tensor where instances align with original detection
# instances. New shape of keypoint coordinates and scores are
# [1, max_detections, num_total_keypoints, 2] and
# [1, max_detections, num_total_keypoints], respectively.
kpt_coords_for_example_all_det, kpt_scores_for_example_all_det = (
_pad_to_full_instance_dim(
kpt_coords_for_example, kpt_scores_for_example,
instance_inds_for_example,
self._center_params.max_box_predictions))
else:
kpt_coords_for_example_all_det = tf.zeros(
[1, max_detections, total_num_keypoints, 2], dtype=tf.float32)
kpt_scores_for_example_all_det = tf.zeros(
[1, max_detections, total_num_keypoints], dtype=tf.float32)
kpt_coords_for_example_list.append(kpt_coords_for_example_all_det)
kpt_scores_for_example_list.append(kpt_scores_for_example_all_det)
# Concatenate all keypoints and scores from all examples in the batch.
# Shapes are [batch_size, max_detections, num_total_keypoints, 2] and
# [batch_size, max_detections, num_total_keypoints], respectively.
keypoints = tf.concat(kpt_coords_for_example_list, axis=0)
keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0)
return keypoints, keypoint_scores
def _get_instance_indices(self, classes, num_detections, batch_index,
class_id):
"""Gets the instance indices that match the target class ID.
Args:
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
num_detections: A [batch_size] int tensor with the number of valid
detections for each image.
batch_index: An integer specifying the index for an example in the batch.
class_id: Class id
Returns:
instance_inds: A [num_instances] int32 tensor where each element indicates
the instance location within the `classes` tensor. This is useful to
associate the refined keypoints with the original detections (i.e.
boxes)
"""
classes = classes[batch_index:batch_index+1, ...]
_, max_detections = shape_utils.combined_static_and_dynamic_shape(
classes)
# Get the detection indices corresponding to the target class.
# Call tf.math.equal with matched tensor shape to make it tf.lite
# compatible.
valid_detections_with_kpt_class = tf.math.logical_and(
tf.range(max_detections) < num_detections[batch_index],
tf.math.equal(classes[0], tf.fill(classes[0].shape, class_id)))
instance_inds = tf.where(valid_detections_with_kpt_class)[:, 0]
# Cast the indices tensor to int32 for tf.lite compatibility.
return tf.cast(instance_inds, tf.int32)
def _postprocess_keypoints_for_class_and_image(
self, keypoint_heatmap, keypoint_offsets, keypoint_regression, classes,
y_indices, x_indices, boxes, indices_with_kpt_class, batch_index,
kp_params):
"""Postprocess keypoints for a single image and class.
This function performs the following postprocessing operations on a single
image and single keypoint class:
- Converts keypoints scores to range [0, 1] with sigmoid.
- Determines the detections that correspond to the specified keypoint class.
- Gathers the regressed keypoints at the detection (i.e. box) centers.
- Gathers keypoint candidates from the keypoint heatmaps.
- Snaps regressed keypoints to nearby keypoint candidates.
Args:
keypoint_heatmap: A [batch_size, height, width, num_keypoints] float32
tensor with keypoint heatmaps.
keypoint_offsets: A [batch_size, height, width, 2] float32 tensor with
local offsets to keypoint centers.
keypoint_regression: A [batch_size, height, width, 2 * num_keypoints]
float32 tensor with regressed offsets to all keypoints.
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
boxes: A [batch_size, max_detections, 4] float32 tensor with detected
boxes in the output (strided) frame.
indices_with_kpt_class: A [num_instances] int tensor where each element
indicates the instance location within the `classes` tensor. This is
useful to associate the refined keypoints with the original detections
(i.e. boxes)
batch_index: An integer specifying the index for an example in the batch.
kp_params: A `KeypointEstimationParams` object with parameters for a
single keypoint class.
Returns:
A tuple of
refined_keypoints: A [1, num_instances, num_keypoints, 2] float32 tensor
with refined keypoints for a single class in a single image, expressed
in the output (strided) coordinate frame. Note that `num_instances` is a
dynamic dimension, and corresponds to the number of valid detections
for the specific class.
refined_scores: A [1, num_instances, num_keypoints] float32 tensor with
keypoint scores.
"""
keypoint_indices = kp_params.keypoint_indices
num_keypoints = len(keypoint_indices)
keypoint_heatmap = tf.nn.sigmoid(
keypoint_heatmap[batch_index:batch_index+1, ...])
keypoint_offsets = keypoint_offsets[batch_index:batch_index+1, ...]
keypoint_regression = keypoint_regression[batch_index:batch_index+1, ...]
y_indices = y_indices[batch_index:batch_index+1, ...]
x_indices = x_indices[batch_index:batch_index+1, ...]
# Gather the feature map locations corresponding to the object class.
y_indices_for_kpt_class = tf.gather(y_indices, indices_with_kpt_class,
axis=1)
x_indices_for_kpt_class = tf.gather(x_indices, indices_with_kpt_class,
axis=1)
boxes_for_kpt_class = tf.gather(boxes, indices_with_kpt_class, axis=1)
# Gather the regressed keypoints. Final tensor has shape
# [1, num_instances, num_keypoints, 2].
regressed_keypoints_for_objects = regressed_keypoints_at_object_centers(
keypoint_regression, y_indices_for_kpt_class, x_indices_for_kpt_class)
regressed_keypoints_for_objects = tf.reshape(
regressed_keypoints_for_objects, [1, -1, num_keypoints, 2])
# Get the candidate keypoints and scores.
# The shape of keypoint_candidates and keypoint_scores is:
# [1, num_candidates_per_keypoint, num_keypoints, 2] and
# [1, num_candidates_per_keypoint, num_keypoints], respectively.
keypoint_candidates, keypoint_scores, num_keypoint_candidates = (
prediction_tensors_to_keypoint_candidates(
keypoint_heatmap, keypoint_offsets,
keypoint_score_threshold=(
kp_params.keypoint_candidate_score_threshold),
max_pool_kernel_size=kp_params.peak_max_pool_kernel_size,
max_candidates=kp_params.num_candidates_per_keypoint))
# Get the refined keypoints and scores, of shape
# [1, num_instances, num_keypoints, 2] and
# [1, num_instances, num_keypoints], respectively.
refined_keypoints, refined_scores = refine_keypoints(
regressed_keypoints_for_objects, keypoint_candidates, keypoint_scores,
num_keypoint_candidates, bboxes=boxes_for_kpt_class,
unmatched_keypoint_score=kp_params.unmatched_keypoint_score,
box_scale=kp_params.box_scale,
candidate_search_scale=kp_params.candidate_search_scale,
candidate_ranking_mode=kp_params.candidate_ranking_mode)
return refined_keypoints, refined_scores
def regularization_losses(self):
return []
def restore_map(self,
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False):
raise RuntimeError('CenterNetMetaArch not supported under TF1.x.')
def restore_from_objects(self, fine_tune_checkpoint_type='detection'):
"""Returns a map of Trackable objects to load from a foreign checkpoint.
Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module
or Checkpoint). This enables the model to initialize based on weights from
another task. For example, the feature extractor variables from a
classification model can be used to bootstrap training of an object
detector. When loading from an object detection model, the checkpoint model
should have the same parameters as this detection model with exception of
the num_classes parameter.
Note that this function is intended to be used to restore Keras-based
models when running Tensorflow 2, whereas restore_map (not implemented
in CenterNet) is intended to be used to restore Slim-based models when
running Tensorflow 1.x.
TODO(jonathanhuang): Make this function consistent with other
meta-architectures.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`, `fine_tune`.
Default 'detection'.
'detection': used when loading models pre-trained on other detection
tasks. With this checkpoint type the weights of the feature extractor
are expected under the attribute 'feature_extractor'.
'classification': used when loading models pre-trained on an image
classification task. Note that only the encoder section of the network
is loaded and not the upsampling layers. With this checkpoint type,
the weights of only the encoder section are expected under the
attribute 'feature_extractor'.
'fine_tune': used when loading the entire CenterNet feature extractor
pre-trained on other tasks. The checkpoints saved during CenterNet
model training can be directly loaded using this type. With this
checkpoint type, the weights of the feature extractor are expected
under the attribute 'model._feature_extractor'.
For more details, see the tensorflow section on Loading mechanics.
https://www.tensorflow.org/guide/checkpoint#loading_mechanics
Returns:
A dict mapping keys to Trackable objects (tf.Module or Checkpoint).
"""
supported_types = self._feature_extractor.supported_sub_model_types
supported_types += ['fine_tune']
if fine_tune_checkpoint_type not in supported_types:
message = ('Checkpoint type "{}" not supported for {}. '
'Supported types are {}')
raise ValueError(
message.format(fine_tune_checkpoint_type,
self._feature_extractor.__class__.__name__,
supported_types))
elif fine_tune_checkpoint_type == 'fine_tune':
feature_extractor_model = tf.train.Checkpoint(
_feature_extractor=self._feature_extractor)
return {'model': feature_extractor_model}
else:
return {'feature_extractor': self._feature_extractor.get_sub_model(
fine_tune_checkpoint_type)}
def updates(self):
raise RuntimeError('This model is intended to be used with model_lib_v2 '
'which does not support updates()')
|
py | 1a3653cb13701905e291a306a6251f283b564c58 | def findDecision(obj): #obj[0]: Passanger, obj[1]: Time, obj[2]: Coupon, obj[3]: Gender, obj[4]: Age, obj[5]: Children, obj[6]: Education, obj[7]: Occupation, obj[8]: Income, obj[9]: Bar, obj[10]: Coffeehouse, obj[11]: Restaurant20to50, obj[12]: Direction_same, obj[13]: Distance
# {"feature": "Occupation", "instances": 34, "metric_value": 0.9774, "depth": 1}
if obj[7]<=7:
# {"feature": "Distance", "instances": 21, "metric_value": 0.7919, "depth": 2}
if obj[13]>1:
# {"feature": "Restaurant20to50", "instances": 13, "metric_value": 0.3912, "depth": 3}
if obj[11]<=2.0:
return 'True'
elif obj[11]>2.0:
return 'False'
else: return 'False'
elif obj[13]<=1:
# {"feature": "Income", "instances": 8, "metric_value": 1.0, "depth": 3}
if obj[8]<=7:
# {"feature": "Passanger", "instances": 5, "metric_value": 0.7219, "depth": 4}
if obj[0]<=1:
return 'False'
elif obj[0]>1:
# {"feature": "Age", "instances": 2, "metric_value": 1.0, "depth": 5}
if obj[4]>1:
return 'True'
elif obj[4]<=1:
return 'False'
else: return 'False'
else: return 'True'
elif obj[8]>7:
return 'True'
else: return 'True'
else: return 'False'
elif obj[7]>7:
# {"feature": "Education", "instances": 13, "metric_value": 0.8905, "depth": 2}
if obj[6]<=3:
# {"feature": "Time", "instances": 11, "metric_value": 0.684, "depth": 3}
if obj[1]>0:
return 'False'
elif obj[1]<=0:
# {"feature": "Passanger", "instances": 4, "metric_value": 1.0, "depth": 4}
if obj[0]<=2:
# {"feature": "Coffeehouse", "instances": 3, "metric_value": 0.9183, "depth": 5}
if obj[10]>1.0:
return 'False'
elif obj[10]<=1.0:
return 'True'
else: return 'True'
elif obj[0]>2:
return 'True'
else: return 'True'
else: return 'False'
elif obj[6]>3:
return 'True'
else: return 'True'
else: return 'False'
|
py | 1a3654d0261275183161b329e18e3b0edc95e1b3 | import unittest
import numpy
from cupy import testing
@testing.parameterize(
{'array': numpy.arange(6).reshape([2, 3]), 'pad_width': 1,
'mode': 'constant'},
{'array': numpy.arange(6).reshape([2, 3]),
'pad_width': [1, 2], 'mode': 'constant'},
{'array': numpy.arange(6).reshape([2, 3]),
'pad_width': [[1, 2], [3, 4]], 'mode': 'constant'},
)
@testing.gpu
class TestPadDefault(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_pad_default(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
a = xp.pad(array, self.pad_width, mode=self.mode)
return a
@testing.parameterize(
{'array': numpy.arange(6).reshape([2, 3]), 'pad_width': 1,
'mode': 'constant', 'constant_values': 3},
{'array': numpy.arange(6).reshape([2, 3]),
'pad_width': [1, 2], 'mode': 'constant',
'constant_values': [3, 4]},
{'array': numpy.arange(6).reshape([2, 3]),
'pad_width': [[1, 2], [3, 4]], 'mode': 'constant',
'constant_values': [[3, 4], [5, 6]]},
)
@testing.gpu
# Old numpy does not work with multi-dimensional constant_values
@testing.with_requires('numpy>=1.11.1')
class TestPad(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_pad(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
a = xp.pad(array, self.pad_width, mode=self.mode,
constant_values=self.constant_values)
return a
@testing.gpu
class TestPadNumpybug(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.with_requires('numpy>=1.11.2')
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_pad_highdim_default(self, xp, dtype):
array = xp.arange(6, dtype=dtype).reshape([2, 3])
pad_width = [[1, 2], [3, 4]]
constant_values = [[1, 2], [3, 4]]
a = xp.pad(array, pad_width, mode='constant',
constant_values=constant_values)
return a
@testing.parameterize(
{'array': [], 'pad_width': 1, 'mode': 'constant', 'constant_values': 3},
{'array': 1, 'pad_width': 1, 'mode': 'constant', 'constant_values': 3},
{'array': [0, 1, 2, 3], 'pad_width': 1, 'mode': 'constant',
'constant_values': 3},
{'array': [0, 1, 2, 3], 'pad_width': [1, 2], 'mode': 'constant',
'constant_values': 3},
)
@testing.gpu
class TestPadSpecial(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.numpy_cupy_array_equal()
def test_pad_special(self, xp):
a = xp.pad(self.array, self.pad_width, mode=self.mode,
constant_values=self.constant_values)
return a
@testing.parameterize(
{'array': [0, 1, 2, 3], 'pad_width': [-1, 1], 'mode': 'constant',
'constant_values': 3},
{'array': [0, 1, 2, 3], 'pad_width': [], 'mode': 'constant',
'constant_values': 3},
{'array': [0, 1, 2, 3], 'pad_width': [[3, 4], [5, 6]], 'mode': 'constant',
'constant_values': 3},
{'array': [0, 1, 2, 3], 'pad_width': [1], 'mode': 'constant',
'notallowedkeyword': 3},
)
@testing.gpu
@testing.with_requires('numpy>=1.11.1') # Old numpy fails differently
class TestPadFailure(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.numpy_cupy_raises()
def test_pad_failure(self, xp):
a = xp.pad(self.array, self.pad_width, mode=self.mode,
constant_values=self.constant_values)
return a
|
py | 1a365545beae3a4e90be662dda168746a6a88f75 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
from . import AbstractCostFunction
from .gap_close import AbstractGapCloseCostFunction
from ...trajectories import Trajectories
__all__ = ["BrownianLinkCostFunction", "BrownianGapCloseCostFunction"]
class BrownianLinkCostFunction(AbstractCostFunction):
"""This class generates cost matrices for brownian motion
trajectories.
The cost between two position is given by the square of their
distance
Attributes
----------
parameters: dict
Used by the `build` method, with the following keys:
- 'distance_metric': a string, default 'euclidean',
passed to `scipy.spatial.distance.cdist`
(see this function documentation for more)
- 'coords': a list of column names on which to compute the distance,
default ['x', 'y', 'z']
- 'max_speed': a float, default 1. All the values of the cost matrix
for which the distance *divided by the time difference* is higher than
this parameter's value are set to np.nan
context: dict
Context is used to store vectors.
- pos_in: :class:`pandas.DataFrame`
The object coordinates to link from
- pos_out: :class:`pandas.DataFrame`
The object coordinates to link to
"""
def __init__(self, parameters):
"""
"""
_parameters = {'distance_metric': 'euclidean',
'max_speed': 1.,
'coords': ['x', 'y', 'z']}
_parameters.update(parameters)
super(BrownianLinkCostFunction, self).__init__(context={}, parameters=_parameters)
def _build(self):
"""
"""
# Get parameters
coords = self.parameters['coords']
distance_metric = self.parameters['distance_metric']
max_speed = self.parameters['max_speed']
# Check context
pos_in = self.check_context('pos_in', pd.DataFrame)
pos_out = self.check_context('pos_out', pd.DataFrame)
# Chech vectors
self.check_columns([pos_in, pos_out], list(coords) + ['t'])
if pos_out.empty or pos_in.empty:
return pd.DataFrame([])
dt = pos_out['t'].iloc[0] - pos_in['t'].iloc[0]
# Build matrix block
distances = cdist(pos_in[coords].astype(np.float),
pos_out[coords].astype(np.float),
metric=distance_metric)
distances /= np.abs(dt)
distances[distances > max_speed] = np.nan
distances = distances ** 2
return distances
class BrownianGapCloseCostFunction(AbstractGapCloseCostFunction):
"""
"""
def __init__(self, parameters):
"""
"""
_parameters = {'distance_metric': 'euclidean',
'max_speed': 1.,
'coords': ['x', 'y', 'z']}
_parameters.update(parameters)
super(self.__class__, self).__init__(context={}, parameters=_parameters)
def _build(self,):
"""
"""
self.check_idxs_length()
# Get parameters
coords = self.parameters['coords']
distance_metric = self.parameters['distance_metric']
if distance_metric != 'euclidean':
raise Exception("Only 'euclidean' distance are supported for now.")
max_speed = self.parameters['max_speed']
# Check context
idxs_in = self.check_context('idxs_in', list)
idxs_out = self.check_context('idxs_out', list)
trajs = self.check_context('trajs', Trajectories)
# Just in case the parent didn't do it
trajs.relabel_fromzero('label', inplace=True)
# Init 2d distances array
mat = np.empty((len(trajs.labels),
len(trajs.labels)))
mat.fill(np.nan)
# Compute distance between all_pos_out and all_pos_in
all_pos_in = trajs.loc[idxs_in]
all_pos_out = trajs.loc[idxs_out]
vecs = [(all_pos_in[c].values - all_pos_out[c].values) ** 2 for c in coords]
all_dist = np.sqrt(np.sum(vecs, axis=0))
# Get all dt
all_dt = np.abs(all_pos_in['t'].values - all_pos_out['t'].values)
# Compute speeds
speeds = all_dist / all_dt
# Remove speeds greater than 'max_speed'
speeds[speeds > max_speed] = np.nan
# Fill 2d distances array
i_in = np.array(idxs_in)[:, 1].astype(int)
i_out = np.array(idxs_out)[:, 1].astype(int)
mat[i_in, i_out] = speeds
mat = mat ** 2
return mat
|
py | 1a365560fae99eaad663dc171675a4eb43665b2a | from germanium.impl import _filter_one_for_action
def _element(germanium, selector):
"""
Finds a single element for doing a visual action.
:param germanium:
:param selector:
:return:
"""
element = None
if selector:
items = germanium.S(selector).element_list(only_visible=False)
element = _filter_one_for_action(germanium, items)
return element
|
py | 1a365570a564b0151a56c1f092d9168401219d22 | #Matt Morrow spcID2412353 COURSE: COP1000
#Statement 1: create random numbers between 1 and 25
#Statement 2: sort the numbers
#Statement 3: display in values
#Statement 4: display values in order
#Statement 5: Determine odds/evens and display
import random
def main():
nums = []
for value in range(10):
nums.append(random.randint(1,25))
numbers = sorted(nums)
for x in range(1):
print(*nums)
print(*numbers)
start = numbers[:4]
print(start)
finish = numbers[-5:]
print(finish)
odd_even(numbers)
def odd_even(numbers):
even_count = 0
odd_count = 0
for val in numbers:
if val % 2 == 0:
even_count += 1
if val % 2 != 0:
odd_count += 1
print("List had" + ' ' + str(even_count) + ' ' + "evens and" + ' ' + str(odd_count) + ' ' + "odds")
print("The 6th element in sorted nums is" + ' ' + str(numbers[5]))
main()
|
py | 1a3655d455abd3342afd353772ee97ea8e4cebe9 | import glymur
import os
import numpy as np
import tempfile
class jpeg(object):
@staticmethod
def name():
'''No Encoding
'''
return 'JPEG2000'
@staticmethod
def compress(data, *args, **kwargs):
'''JPEG2000 compression
'''
TMPFOLDER = tempfile.mkdtemp()
compressed_data = ''
sizes = []
for iz in range(0, data.shape[0]):
img = data[iz, :, :]
colorized = np.zeros(
(3, img.shape[0], img.shape[1]), dtype=np.uint16
)
# for every value split into three 16 bit samples
colorized[0, :, :] = img % (2**16)
img = img >> 16
colorized[1, :, :] = img % (2**16)
img = img >> 32
colorized[2, :, :] = img % (2**16)
#print colorized.shape
glymur.Jp2k(TMPFOLDER+'/tmp_' + str(iz) + '.jp2', colorized)
#glymur.Jp2k('JPEG_TMP/tmp_' + str(iz) + '.jp2', img.astype(np.uint16))
with open(TMPFOLDER+'/tmp_' + str(iz) + '.jp2', 'rb') as fd:
c_data = fd.read()
compressed_data += c_data
sizes.append(len(c_data))
frames = np.zeros((len(sizes)), dtype=np.uint64)
for i,s in enumerate(sizes):
frames[i] = s
#
#
# no of frames
output = np.uint64(len(sizes)).tobytes()
# frame sizes
output += frames.tobytes()
output += compressed_data
# print sizes
return output
@staticmethod
def decompress(data, *args, **kwargs):
'''JPEG2000 decompression
'''
TMPFOLDER = tempfile.mkdtemp()
# grab no of frames
no_frames = np.fromstring(data[0:8], dtype=np.uint64)
# print no_frames, len(data), data[8:8*no_frames]
no_frames = no_frames[0]
frame_sizes = data[8:8+int(8*no_frames)]
# print no_frames, frame_sizes
# grab frame sizes
sizes = np.fromstring(frame_sizes, dtype=np.uint64)
# store each frame to TMP FOLDER
data_start_byte = 8 + 8*no_frames
current_byte_pointer = data_start_byte
for i in range(sizes.shape[0]):
# print 'writing',i,current_byte_pointer,current_byte_pointer+sizes[i]
current_bytes = data[int(current_byte_pointer):int(current_byte_pointer+sizes[i])]
with open(TMPFOLDER+'/tmp_'+str(i)+'.jp2', 'wb') as f:
f.write(current_bytes)
current_byte_pointer = current_byte_pointer+sizes[i]
nfiles = len(os.listdir(TMPFOLDER))
for ie, filename in enumerate(os.listdir(TMPFOLDER)):
input_filename = TMPFOLDER + '/' + filename
colorized = glymur.Jp2k(input_filename)
index = int(filename.split('_')[1].split('.')[0])
if (ie == 0):
decompressed_data = np.zeros(
(nfiles, colorized.shape[1], colorized.shape[2]),
dtype=np.uint64
)
decompressed_data[index, :, :] = (
colorized[0, :, :] +
colorized[1, :, :] * (2 ** 16) +
colorized[2, :, :] * (2 ** 16)
)
return decompressed_data
|
py | 1a3658570653ad132ed5336bfcbb238f8a99376f | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import grad
import math
from itertools import chain
from ._deeplab import ASPP
# -------------------------------------------
# Multi-Knowledge Aggregation
# -------------------------------------------
class Reshape(nn.Module):
def __init__(self, *args):
super(Reshape, self).__init__()
self.shape = args
def forward(self, x):
return x.view((x.size(0),)+self.shape)
class DeepLab_AUX(nn.Module):
def __init__(self, agg_ch, in_channels, num_classes=11, aspp_dilate=[6, 12, 18]):
super(DeepLab_AUX, self).__init__()
# self.agg = nn.Sequential(
# nn.Conv2d(agg_ch, in_channels, 1),
# nn.BatchNorm2d(in_channels),
# nn.ReLU()
# )
self.aspp = ASPP(in_channels, aspp_dilate)
self.head = nn.Sequential(
nn.Conv2d(256, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, num_classes, 1)
)
def forward(self, x, input_size):
# ka = self.agg(x)
out = self.aspp(x)
out = self.head(out)
out = F.interpolate(out, size=input_size, mode='bilinear', align_corners=False)
return out
class MKAT_F(nn.Module):
def __init__(self, s_shape, t_shape, nz=256, kn_list=range(5)):
super(MKAT_F, self).__init__()
self.nz = nz
self.kn_list = kn_list
self.num_k = len(kn_list)
agg_ch = self.num_k * nz
in_channels = 512
def conv1x1(in_channels, out_channels, stride=1):
return nn.Conv2d(
in_channels, out_channels,
kernel_size=1, padding=0,
bias=False, stride=stride)
at_shape = (s_shape[0], 1, s_shape[2] * s_shape[3])
jac_shape = (s_shape[0], 3, 768, 768)
af_shape = s_shape
sa_shape = s_shape
ca_shape = s_shape
cm_shape = s_shape
gm_shape = s_shape
self.at_enc_s = nn.Sequential(
conv1x1(at_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.af_enc_s = nn.Sequential(
conv1x1(af_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.sa_enc_s = nn.Sequential(
conv1x1(sa_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.ca_enc_s = nn.Sequential(
conv1x1(ca_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.cm_enc_s = nn.Sequential(
conv1x1(cm_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.gm_enc_s = nn.Sequential(
conv1x1(gm_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.jac_enc_s = nn.Sequential(
nn.Conv2d(jac_shape[1], nz//8, 5, 1),
nn.BatchNorm2d(nz//8),
nn.ReLU6(inplace=True),
nn.Conv2d(nz//8, nz//4, 5, 3, 1),
nn.BatchNorm2d(nz//4),
nn.ReLU6(inplace=True),
conv1x1(nz//4, nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
at_shape = (t_shape[0], 1, t_shape[2] * t_shape[3])
jac_shape = (t_shape[0], 3, 768, 768)
af_shape = t_shape
sa_shape = t_shape
ca_shape = t_shape
cm_shape = t_shape
gm_shape = t_shape
self.at_enc_t = nn.Sequential(
conv1x1(at_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.af_enc_t = nn.Sequential(
conv1x1(af_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.sa_enc_t = nn.Sequential(
conv1x1(sa_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.ca_enc_t = nn.Sequential(
conv1x1(ca_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.cm_enc_t = nn.Sequential(
conv1x1(cm_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.gm_enc_t = nn.Sequential(
conv1x1(gm_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.agg_s = nn.Sequential(
nn.Conv2d(agg_ch, in_channels, 1),
nn.BatchNorm2d(in_channels),
nn.ReLU()
)
self.agg_t = nn.Sequential(
nn.Conv2d(agg_ch, in_channels, 1),
nn.BatchNorm2d(in_channels),
nn.ReLU()
)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
@staticmethod
def adapt_wh(f_s, f_t):
s_H, t_H = f_s.shape[2], f_t.shape[2]
if s_H > t_H:
f_s = F.adaptive_avg_pool2d(f_s, (t_H, t_H))
elif s_H < t_H:
f_t = F.adaptive_avg_pool2d(f_t, (s_H, s_H))
else:
pass
return f_s, f_t
def forward(self, f_s, f_t):
f_s, f_t = self.adapt_wh(f_s, f_t)
at_s, at_t = self.at(f_s), self.at(f_t)
af_s, af_t = self.af(f_s), self.af(f_t)
cm_s, cm_t = self.cm(f_s), self.cm(f_t)
sa_s, sa_t = self.sa(f_s), self.sa(f_t)
ca_s, ca_t = self.ca(f_s), self.ca(f_t)
gm_s, gm_t = self.gram(f_s), self.gram(f_t)
at_em_s, at_em_t = self.at_enc_s(at_s), self.at_enc_t(at_t)
af_em_s, af_em_t = self.af_enc_s(af_s), self.af_enc_t(af_t)
cm_em_s, cm_em_t = self.cm_enc_s(cm_s), self.cm_enc_t(cm_t)
sa_em_s, sa_em_t = self.sa_enc_s(sa_s), self.sa_enc_t(sa_t)
ca_em_s, ca_em_t = self.ca_enc_s(ca_s), self.ca_enc_t(ca_t)
gm_em_s, gm_em_t = self.gm_enc_s(gm_s), self.gm_enc_t(gm_t)
stack_s = [at_em_s, af_em_s, cm_em_s, sa_em_s, ca_em_s, gm_em_s]
stack_t = [at_em_t, af_em_t, cm_em_t, sa_em_t, ca_em_t, gm_em_t]
feat_stack_s = torch.cat([stack_s[i] for i in self.kn_list], dim=1) #
feat_stack_t = torch.cat([stack_t[i] for i in self.kn_list], dim=1) #
feat_s = self.agg_s(feat_stack_s)
feat_t = self.agg_t(feat_stack_t)
return feat_stack_s, feat_stack_t, feat_s, feat_t
''' get params '''
def enc_s_params(self):
return chain(self.at_enc_s.parameters(), self.af_enc_s.parameters(), self.ca_enc_s.parameters(),
self.sa_enc_s.parameters(), self.cm_enc_s.parameters(), self.gm_enc_s.parameters(), self.agg_s.parameters())
def enc_t_params(self):
return chain(self.at_enc_t.parameters(), self.af_enc_t.parameters(), self.ca_enc_t.parameters(),
self.sa_enc_t.parameters(), self.cm_enc_t.parameters(), self.gm_enc_t.parameters(), self.agg_t.parameters())
''' ---- 6/7 forms of knowledge ---- '''
@staticmethod
# attention
def at(f, p=2):
return F.normalize(f.pow(p).mean(1).view(f.size(0), -1)).reshape((f.size(0), 1, f.size(2), f.size(3)))
@staticmethod
# correlation matrix -- dual affinity
def cm(f, P_order=2, gamma=0.4):
f = F.normalize(f, p=2, dim=-1)
f_trans = torch.transpose(f, 2, 3)
sim_mat = torch.matmul(f_trans, torch.matmul(f, f_trans)) # (H*W)x[(W*H)x(H*W)] = (H*W)
corr_mat1 = torch.zeros_like(sim_mat)
for p in range(P_order+1):
corr_mat1 += math.exp(-2*gamma) * (2*gamma)**p / \
math.factorial(p) * torch.pow(sim_mat, p)
corr_mat1 = torch.transpose(corr_mat1, 2, 3)
sim_mat2 = torch.matmul(f, torch.matmul(f_trans, f)) # (W*H)x[(H*W)x(W*H)] = (W*H)
corr_mat2 = torch.zeros_like(sim_mat2)
for p in range(P_order+1):
corr_mat2 += math.exp(-2*gamma) * (2*gamma)**p / \
math.factorial(p) * torch.pow(sim_mat2, p)
corr_mat = corr_mat1 + corr_mat2
return corr_mat
@staticmethod
# grad cam
def cam(out, f, target):
target_out = torch.gather(out, 2, torch.unsqueeze(target, 1))
grad_fm = grad(outputs=target_out, inputs=f,
grad_outputs=torch.ones_like(target_out),
create_graph=True, retain_graph=True, only_inputs=True)[0]
weights = F.adaptive_avg_pool2d(grad_fm, 1)
cam = torch.sum(torch.mul(weights, grad_fm), dim=1, keepdim=True)
cam = F.relu(cam)
cam = cam.view(cam.size(0), -1)
norm_cam = F.normalize(cam, p=2, dim=1)
return norm_cam
@staticmethod
# grad norm
def jacobian_grad(out, img, target):
target_out = torch.gather(out, 2, torch.unsqueeze(target, 1))
grad_ = grad(outputs=target_out, inputs=img,
grad_outputs=torch.ones_like(target_out),
create_graph=True, retain_graph=True, only_inputs=True)[0]
norm_grad = F.normalize(grad_.view(grad_.size(0), -1), p=2, dim=1)
return norm_grad
@staticmethod
# attention feature norm
def af(f, eps=1e-6):
fm_norm = torch.norm(f, dim=(2,3), keepdim=True)
af = torch.div(f, fm_norm + eps)
return af
@staticmethod
# spatial attention
def sa(f, gamma=0.4):
m_batchsize, C, height, width = f.size()
proj_query = f.view(m_batchsize, -1, width*height).permute(0, 2, 1)
proj_key = f.view(m_batchsize, -1, width*height)
energy = torch.bmm(proj_query, proj_key)
attention = F.softmax(energy, dim=-1)
proj_value = f.view(m_batchsize, -1, width*height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, height, width)
# out = gamma*out + f
return out
@staticmethod
# channel attention
def ca(f, gamma=0.4):
m_batchsize, C, height, width = f.size()
proj_query = f.view(m_batchsize, C, -1)
proj_key = f.view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy)-energy
attention = F.softmax(energy_new, dim=-1)
proj_value = f.view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
# out = gamma*out + f
return out
# gram matrix
@staticmethod
def gram(f):
shape = f.shape
f = f.view(f.size(0), f.size(1), -1)
fm = F.normalize(f, dim=2)
gram_matrix = torch.bmm(fm, fm.transpose(1, 2))
trans_gram = torch.bmm(gram_matrix, f)
trans_gram = trans_gram.view(shape)
return trans_gram
|
py | 1a3658ceaeff5f8c43727a76e15f74e1e77157ac | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Base',
'version': '1.3',
'category': 'Hidden',
'description': """
The kernel of Odoo, needed for all installation.
===================================================
""",
'depends': [],
'data': [
'data/res.lang.csv',
'data/res_lang_data.xml',
'data/res_partner_data.xml',
'data/res_company_data.xml',
'data/res_users_data.xml',
'data/report_paperformat_data.xml',
'data/res_currency_data.xml',
'data/res_country_data.xml',
'data/ir_demo_data.xml',
'security/base_groups.xml',
'security/base_security.xml',
'views/base_menus.xml',
'views/decimal_precision_views.xml',
'views/res_config_views.xml',
'data/res.country.state.csv',
'views/ir_actions_views.xml',
'views/ir_config_parameter_views.xml',
'views/ir_cron_views.xml',
'views/ir_filters_views.xml',
'views/ir_mail_server_views.xml',
'views/ir_model_views.xml',
'views/ir_attachment_views.xml',
'views/ir_rule_views.xml',
'views/ir_sequence_views.xml',
'views/ir_translation_views.xml',
'views/ir_ui_menu_views.xml',
'views/ir_ui_view_views.xml',
'views/ir_default_views.xml',
'data/ir_cron_data.xml',
'report/ir_model_report.xml',
'report/ir_model_templates.xml',
'views/ir_logging_views.xml',
'views/ir_qweb_widget_templates.xml',
'views/ir_module_views.xml',
'data/ir_module_category_data.xml',
'data/ir_module_module.xml',
'report/ir_module_reports.xml',
'report/ir_module_report_templates.xml',
'wizard/base_module_update_views.xml',
'wizard/base_language_install_views.xml',
'wizard/base_import_language_views.xml',
'wizard/base_module_upgrade_views.xml',
'wizard/base_module_uninstall_views.xml',
'wizard/base_export_language_views.xml',
'wizard/base_update_translations_views.xml',
'wizard/base_partner_merge_views.xml',
'data/ir_actions_data.xml',
'data/ir_demo_failure_data.xml',
'views/res_company_views.xml',
'views/res_lang_views.xml',
'views/res_partner_views.xml',
'views/res_bank_views.xml',
'views/res_country_views.xml',
'views/res_currency_views.xml',
'views/res_users_views.xml',
'views/ir_property_views.xml',
'views/res_config_settings_views.xml',
'views/report_paperformat_views.xml',
'views/onboarding_views.xml',
'security/ir.model.access.csv',
],
'demo': [
'data/res_company_demo.xml',
'data/res_users_demo.xml',
'data/res_partner_bank_demo.xml',
'data/res_currency_rate_demo.xml',
'data/res_bank_demo.xml',
'data/res_partner_demo.xml',
'data/res_partner_image_demo.xml',
],
'test': [],
'installable': True,
'auto_install': True,
'post_init_hook': 'post_init',
}
|
py | 1a3658ed2e9c9cde54e99100ff259203a3aae49b | """
Binary Ninja plugin that imports a capa report,
produced via `capa --json /path/to/sample`,
into the current database.
It will mark up functions with their capa matches, like:
; capa: print debug messages (host-interaction/log/debug/write-event)
; capa: delete service (host-interaction/service/delete)
; Attributes: bp-based frame
public UninstallService
UninstallService proc near
...
To use, invoke from the Binary Ninja Tools menu, or from the command-palette.
Adapted for Binary Ninja by @psifertex
This script will verify that the report matches the workspace.
Check the log window for any errors, and/or the summary of changes.
Derived from: https://github.com/fireeye/capa/blob/master/scripts/import-to-ida.py
"""
import os
import json
from binaryninja import *
def append_func_cmt(bv, va, cmt):
"""
add the given comment to the given function,
if it doesn't already exist.
"""
func = bv.get_function_at(va)
if not func:
raise ValueError("not a function")
if cmt in func.comment:
return
func.comment = func.comment + "\n" + cmt
def load_analysis(bv):
shortname = os.path.splitext(os.path.basename(bv.file.filename))[0]
dirname = os.path.dirname(bv.file.filename)
log_info(f"dirname: {dirname}\nshortname: {shortname}\n")
if os.access(os.path.join(dirname, shortname + ".js"), os.R_OK):
path = os.path.join(dirname, shortname + ".js")
elif os.access(os.path.join(dirname, shortname + ".json"), os.R_OK):
path = os.path.join(dirname, shortname + ".json")
else:
path = interaction.get_open_filename_input("capa report:", "JSON (*.js *.json);;All Files (*)")
if not path or not os.access(path, os.R_OK):
log_error("Invalid filename.")
return 0
log_info("Using capa file %s" % path)
with open(path, "rb") as f:
doc = json.loads(f.read().decode("utf-8"))
if "meta" not in doc or "rules" not in doc:
log_error("doesn't appear to be a capa report")
return -1
a = doc["meta"]["sample"]["md5"].lower()
md5 = Transform["MD5"]
rawhex = Transform["RawHex"]
b = rawhex.encode(md5.encode(bv.parent_view.read(bv.parent_view.start, bv.parent_view.end))).decode("utf-8")
if not a == b:
log_error("sample mismatch")
return -2
rows = []
for rule in doc["rules"].values():
if rule["meta"].get("lib"):
continue
if rule["meta"].get("capa/subscope"):
continue
if rule["meta"]["scope"] != "function":
continue
name = rule["meta"]["name"]
ns = rule["meta"].get("namespace", "")
for va in rule["matches"].keys():
va = int(va)
rows.append((ns, name, va))
# order by (namespace, name) so that like things show up together
rows = sorted(rows)
for ns, name, va in rows:
if ns:
cmt = "%s (%s)" % (name, ns)
else:
cmt = "%s" % (name,)
log_info("0x%x: %s" % (va, cmt))
try:
# message will look something like:
#
# capa: delete service (host-interaction/service/delete)
append_func_cmt(bv, va, "capa: " + cmt)
except ValueError:
continue
log_info("ok")
PluginCommand.register("Load capa file", "Loads an analysis file from capa", load_analysis)
|
py | 1a36593a2f73c30dc001d6ae35c9f5d12616b6b8 | """
Routing for zachsite app.
"""
from django.urls import path
from django.contrib.auth import views as auth_views
from projects.models import Project
from . import views
project_list = Project.objects.all().filter(active=True).order_by("title")
extra_context = {
'projectList': project_list,
'projectLen': str(len(project_list))
}
urlpatterns = [
path('', views.index, name='index'),
path(
'accounts/login/',
auth_views.LoginView.as_view(),
{
'template_name': 'zachsite/login.html',
'extra_context': extra_context
},
name="login"
),
path(
'accounts/logout/',
auth_views.LogoutView.as_view(),
{
'template_name': 'zachsite/logout.html',
'extra_context': extra_context
},
name="logout"
),
path(
'accounts/password_change/',
auth_views.PasswordChangeView.as_view(),
{
'template_name': 'zachsite/pass_change_form.html',
'extra_context': extra_context
},
name="password_change"
),
path(
'accounts/password_change/done/',
auth_views.PasswordChangeDoneView.as_view(),
{
'template_name': 'zachsite/pass_change_done.html',
'extra_context': extra_context
},
name="password_change_done"),
path(
'accounts/password_reset/',
auth_views.PasswordResetView.as_view(),
{
'template_name': 'zachsite/pass_reset_form.html',
'extra_context': extra_context
},
name="password_reset"
),
path(
'accounts/password_reset/done/',
auth_views.PasswordResetDoneView.as_view(),
{
'template_name': 'zachsite/pass_reset_done.html',
'extra_context': extra_context
},
name="password_reset_done"
),
path(
'accounts/reset/',
auth_views.PasswordResetConfirmView.as_view(),
{
'template_name': 'zachsite/pass_reset_confirm.html',
'extra_context': extra_context
},
name="password_reset_confirm"
),
path(
'accounts/reset/done/',
auth_views.PasswordResetCompleteView.as_view(),
{
'template_name': 'zachsite/pass_change_done.html',
'extra_context': extra_context
},
name="password_reset_complete"
)
]
|
py | 1a3659dde12a17bd2ed9763a7a39ade7bfc53c7b | # Copyright 2017: GoDaddy Inc.
import collections
import datetime
import logging
import random
import threading
import futurist
import futurist.rejection
import monotonic
import requests
from netmet.utils import ping
from netmet.utils import pusher
from netmet.utils import secure
LOG = logging.getLogger(__name__)
class Collector(object):
pinger_failed_msg = "Pinger failed to ping"
def __init__(self, netmet_server, client_host, tasks):
self.client_host = client_host
self.tasks = tasks
self.pusher = None
if netmet_server:
netmet_server = netmet_server.rstrip("/")
self.pusher = pusher.Pusher("%s/api/v1/metrics" % netmet_server,
extra_headers=secure.gen_hmac_headers)
self.lock = threading.Lock()
self.queue = collections.deque()
self.death = threading.Event()
self.started = False
self.main_thread = None
self.processing_thread = None
def gen_periodic_ping(self, task):
ip = (task["north-south"]["dest"] if "north-south" in task else
task["east-west"]["dest"]["ip"])
settings = task[task.keys()[0]]["settings"]
pinger = ping.Ping(ip, timeout=settings["timeout"],
packet_size=settings["packet_size"])
def ping_():
try:
result = pinger.ping()
metric = {
"client_src": self.client_host,
"protocol": "icmp",
"timestamp": result["timestamp"],
"latency": result["rtt"],
"packet_size": result["packet_size"],
"lost": int(bool(result["ret_code"])),
"transmitted": int(not bool(result["ret_code"])),
"ret_code": result["ret_code"]
}
if "north-south" in task:
metric["dest"] = task["north-south"]["dest"]
self.queue.append({"north-south": metric})
else:
metric["client_dest"] = task["east-west"]["dest"]
self.queue.append({"east-west": metric})
except Exception:
LOG.exception(self.pinger_failed_msg)
return ping_
def gen_periodic_http_ping(self, task):
def http_ping():
try:
started_at = monotonic.monotonic()
metric = {
"client_src": self.client_host,
"protocol": "http",
"timestamp": datetime.datetime.now().isoformat(),
"packet_size": 0,
"latency": 0,
"lost": 1,
"transmitted": 0,
"ret_code": 504
}
settings = task[task.keys()[0]]["settings"]
if "east-west" in task:
dest = task["east-west"]["dest"]
metric["client_dest"] = dest
dest = "http://%s:%s" % (dest["host"], dest["port"])
else:
dest = task["north-south"]["dest"]
metric["dest"] = dest
r = requests.get(dest, timeout=settings["timeout"])
metric.update({
"latency": (monotonic.monotonic() - started_at) * 1000,
"packet_size": len(r.content),
"lost": int(r.status_code != 200),
"transmitted": int(r.status_code == 200),
"ret_code": r.status_code
})
except requests.exceptions.ConnectionError:
pass
except Exception:
LOG.exception("Collector failed to call another clinet API")
finally:
type_ = "east-west" if "east-west" in task else "north-south"
self.queue.append({type_: metric})
return http_ping
def process_results(self):
while self.queue or not self.death.is_set():
while self.queue:
item = self.queue.popleft()
if self.pusher:
self.pusher.add(item) # push to netmet server data
else:
print(item) # netmet client standalone mode
self.death.wait(0.1)
def _job_per_period(self, callables, period):
def helper():
delay = period / float(len(callables))
pool = futurist.ThreadPoolExecutor(
max_workers=50,
check_and_reject=futurist.rejection.reject_when_reached(50))
with pool:
while not self.death.is_set():
for item in callables:
while not self.death.is_set():
try:
pool.submit(item)
break
except futurist.RejectedSubmission:
LOG.warning("Collector: Feed me! Mre threads!")
self.death.wait(delay)
self.death.wait(delay)
# up to 0.1 second delay between runs of tasks
self.death.wait(random.random() * min(delay, 1) / 10.0)
return helper
def _job(self):
generators = {
"icmp": self.gen_periodic_ping,
"http": self.gen_periodic_http_ping
}
period_tasks = {}
for task in self.tasks:
task_data = task.values()[0]
period_ = task_data["settings"]["period"]
protocol = task_data["protocol"]
period_tasks.setdefault(period_, [])
if protocol in generators:
period_tasks[period_].append(generators[protocol](task))
else:
LOG.warning("Allowed protocols are: %s" % generators.keys())
pool = futurist.ThreadPoolExecutor(max_workers=len(period_tasks))
with pool:
min_period = min(period_tasks)
min_lag = float(min_period) / len(period_tasks[min_period])
lag = min(min_lag / len(period_tasks), 1)
LOG.info(period_tasks)
for period, callables in period_tasks.iteritems():
pool.submit(self._job_per_period(callables, period))
self.death.wait(lag)
def start(self):
with self.lock:
if not self.started:
self.started = True
self.death = threading.Event()
else:
return
if self.pusher:
self.pusher.start()
self.main_thread = threading.Thread(target=self._job)
self.main_thread.daemon = True
self.main_thread.start()
self.processing_thread = threading.Thread(target=self.process_results)
self.processing_thread.deamon = True
self.processing_thread.start()
return True
def stop(self):
with self.lock:
if self.started and not self.death.is_set():
self.death.set()
self.main_thread.join()
self.processing_thread.join()
if self.pusher:
self.pusher.stop()
self.started = False
|
py | 1a365b4b9790f0b774c76fa0b08956dc557bb006 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-14 02:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hours', '0024_merge_20160919_1445'),
]
operations = [
migrations.AlterField(
model_name='timecard',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='timecards', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='timecardobject',
name='timecard',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='timecardobjects', to='hours.Timecard'),
),
]
|
py | 1a365c9723af25fb654628d69b08ab8f01358411 | """
This module provides means of connecting to a QCoDeS database file and
initialising it. Note that connecting/initialisation take into account
database version and possibly perform database upgrades.
"""
import io
import sqlite3
import sys
from contextlib import contextmanager
from os.path import expanduser, normpath
from typing import Union, Iterator, Tuple, Optional
import numpy as np
from numpy import ndarray
from qcodes.dataset.sqlite.connection import ConnectionPlus
from qcodes.dataset.sqlite.db_upgrades import _latest_available_version, \
get_user_version, perform_db_upgrade
from qcodes.dataset.sqlite.initial_schema import init_db
import qcodes.config
from qcodes.utils.types import complex_types, complex_type_union
# utility function to allow sqlite/numpy type
def _adapt_array(arr: ndarray) -> sqlite3.Binary:
"""
See this:
https://stackoverflow.com/questions/3425320/sqlite3-programmingerror-you-must-not-use-8-bit-bytestrings-unless-you-use-a-te
"""
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return sqlite3.Binary(out.read())
def _convert_array(text: bytes) -> ndarray:
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
def _convert_complex(text: bytes) -> complex_type_union:
out = io.BytesIO(text)
out.seek(0)
return np.load(out)[0]
this_session_default_encoding = sys.getdefaultencoding()
def _convert_numeric(value: bytes) -> Union[float, int, str]:
"""
This is a converter for sqlite3 'numeric' type class.
This converter is capable of deducting whether a number is a float or an
int.
Note sqlite3 allows to save data to columns even if their type is not
compatible with the table type class (for example, it is possible to save
integers into 'text' columns). Due to this fact, and for the reasons of
flexibility, the numeric converter is also made capable of handling
strings. An obvious exception to this is 'nan' (case insensitive) which
gets converted to `np.nan`. Another exception to this is 'inf', which
gets converted to 'np.inf'.
"""
try:
# First, try to convert bytes to float
numeric = float(value)
except ValueError as e:
# If an exception has been raised, we first need to find out
# if the reason was the conversion to float, and, if so, we are sure
# that we need to return a string
if "could not convert string to float" in str(e):
return str(value, encoding=this_session_default_encoding)
else:
# otherwise, the exception is forwarded up the stack
raise e
# If that worked, e.g. did not raise an exception, then we check if the
# outcome is 'nan'
if np.isnan(numeric):
return numeric
# Then we check if the outcome is 'inf', includes +inf and -inf
if np.isinf(numeric):
return numeric
# If it is not 'nan' and not 'inf', then we need to see if the value is
# really an integer or with floating point digits
numeric_int = int(numeric)
if numeric != numeric_int:
return numeric
else:
return numeric_int
def _adapt_float(fl: float) -> Union[float, str]:
if np.isnan(fl):
return "nan"
return float(fl)
def _adapt_complex(value: complex_type_union) -> sqlite3.Binary:
out = io.BytesIO()
np.save(out, np.array([value]))
out.seek(0)
return sqlite3.Binary(out.read())
def connect(name: str, debug: bool = False,
version: int = -1) -> ConnectionPlus:
"""
Connect or create database. If debug the queries will be echoed back.
This function takes care of registering the numpy/sqlite type
converters that we need.
Args:
name: name or path to the sqlite file
debug: whether or not to turn on tracing
version: which version to create. We count from 0. -1 means 'latest'.
Should always be left at -1 except when testing.
Returns:
conn: connection object to the database (note, it is
`ConnectionPlus`, not `sqlite3.Connection`
"""
# register numpy->binary(TEXT) adapter
# the typing here is ignored due to what we think is a flaw in typeshed
# see https://github.com/python/typeshed/issues/2429
sqlite3.register_adapter(np.ndarray, _adapt_array)
# register binary(TEXT) -> numpy converter
# for some reasons mypy complains about this
sqlite3.register_converter("array", _convert_array)
sqlite3_conn = sqlite3.connect(name, detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=True)
conn = ConnectionPlus(sqlite3_conn)
latest_supported_version = _latest_available_version()
db_version = get_user_version(conn)
if db_version > latest_supported_version:
raise RuntimeError(f"Database {name} is version {db_version} but this "
f"version of QCoDeS supports up to "
f"version {latest_supported_version}")
# sqlite3 options
conn.row_factory = sqlite3.Row
# Make sure numpy ints and floats types are inserted properly
for numpy_int in [
np.int, np.int8, np.int16, np.int32, np.int64,
np.uint, np.uint8, np.uint16, np.uint32, np.uint64
]:
sqlite3.register_adapter(numpy_int, int)
sqlite3.register_converter("numeric", _convert_numeric)
for numpy_float in [np.float, np.float16, np.float32, np.float64]:
sqlite3.register_adapter(numpy_float, _adapt_float)
for complex_type in complex_types:
sqlite3.register_adapter(complex_type, _adapt_complex)
sqlite3.register_converter("complex", _convert_complex)
if debug:
conn.set_trace_callback(print)
init_db(conn)
perform_db_upgrade(conn, version=version)
return conn
def get_db_version_and_newest_available_version(path_to_db: str) -> Tuple[int,
int]:
"""
Connect to a DB without performing any upgrades and get the version of
that database file along with the newest available version (the one that
a normal "connect" will automatically upgrade to)
Args:
path_to_db: the absolute path to the DB file
Returns:
A tuple of (db_version, latest_available_version)
"""
conn = connect(path_to_db, version=0)
db_version = get_user_version(conn)
return db_version, _latest_available_version()
def get_DB_location() -> str:
return normpath(expanduser(qcodes.config["core"]["db_location"]))
def get_DB_debug() -> bool:
return bool(qcodes.config["core"]["db_debug"])
def initialise_database(journal_mode: Optional[str] = 'WAL') -> None:
"""
Initialise a database in the location specified by the config object
and set ``atomic commit and rollback mode`` of the db. The db is created
with the latest supported version. If the database already exists the
``atomic commit and rollback mode`` is set and the database is upgraded
to the latest version.
Args:
journal_mode: Which `journal_mode` should be used for atomic commit and rollback.
Options are DELETE, TRUNCATE, PERSIST, MEMORY, WAL and OFF. If set to None
no changes are made.
"""
# calling connect performs all the needed actions to create and upgrade
# the db to the latest version.
conn = connect(get_DB_location(), get_DB_debug())
if journal_mode is not None:
set_journal_mode(conn, journal_mode)
conn.close()
del conn
def set_journal_mode(conn: ConnectionPlus, journal_mode: str) -> None:
"""
Set the ``atomic commit and rollback mode`` of the sqlite database.
See https://www.sqlite.org/pragma.html#pragma_journal_mode for details.
Args:
conn: Connection to the database.
journal_mode: Which `journal_mode` should be used for atomic commit and rollback.
Options are DELETE, TRUNCATE, PERSIST, MEMORY, WAL and OFF. If set to None
no changes are made.
"""
valid_journal_modes = ["DELETE", "TRUNCATE", "PERSIST", "MEMORY", "WAL", "OFF"]
if journal_mode not in valid_journal_modes:
raise RuntimeError(f"Invalid journal_mode {journal_mode} "
f"Valid modes are {valid_journal_modes}")
query = f"PRAGMA journal_mode={journal_mode};"
cursor = conn.cursor()
cursor.execute(query)
def initialise_or_create_database_at(db_file_with_abs_path: str,
journal_mode: Optional[str] = 'WAL') -> None:
"""
This function sets up QCoDeS to refer to the given database file. If the
database file does not exist, it will be initiated.
Args:
db_file_with_abs_path
Database file name with absolute path, for example
``C:\\mydata\\majorana_experiments.db``
journal_mode: Which `journal_mode` should be used for atomic commit and rollback.
Options are DELETE, TRUNCATE, PERSIST, MEMORY, WAL and OFF. If set to None
no changes are made.
"""
qcodes.config.core.db_location = db_file_with_abs_path
initialise_database(journal_mode)
@contextmanager
def initialised_database_at(db_file_with_abs_path: str) -> Iterator[None]:
"""
Initializes or creates a database and restores the 'db_location' afterwards.
Args:
db_file_with_abs_path
Database file name with absolute path, for example
``C:\\mydata\\majorana_experiments.db``
"""
db_location = qcodes.config["core"]["db_location"]
try:
initialise_or_create_database_at(db_file_with_abs_path)
yield
finally:
qcodes.config["core"]["db_location"] = db_location
def conn_from_dbpath_or_conn(conn: Optional[ConnectionPlus],
path_to_db: Optional[str]) \
-> ConnectionPlus:
"""
A small helper function to abstract the logic needed for functions
that take either a `ConnectionPlus` or the path to a db file.
If neither is given this will fall back to the default db location.
It is an error to supply both.
Args:
conn: A ConnectionPlus object pointing to a sqlite database
path_to_db: The path to a db file.
Returns:
A `ConnectionPlus` object
"""
if path_to_db is not None and conn is not None:
raise ValueError('Received BOTH conn and path_to_db. Please '
'provide only one or the other.')
if conn is None and path_to_db is None:
path_to_db = get_DB_location()
if conn is None and path_to_db is not None:
conn = connect(path_to_db, get_DB_debug())
elif conn is not None:
conn = conn
else:
# this should be impossible but left here to keep mypy happy.
raise RuntimeError("Could not obtain a connection from"
"supplied information.")
return conn
|
py | 1a365cfbd36731afe885734a5a4ac8d0af70ac76 | """ foxtail/clinics/tests/test_models.py """
import pytest
from .factories import ClinicFactory
pytestmark = pytest.mark.django_db
def test_get_organization():
clinic = ClinicFactory()
org = clinic.organization
assert clinic.get_organization() == org.name
|
py | 1a365d6f1716647269d35daa48704e0efbbd7fd5 | from itertools import chain
import multiprocessing as mp
try:
from multiprocessing import SimpleQueue as MPQueue
except ImportError:
from multiprocessing.queues import SimpleQueue as MPQueue
import os
import threading
from ddtrace import Span
from ddtrace import tracer
from ddtrace.internal import _rand
from ddtrace.internal import forksafe
from ddtrace.internal.compat import Queue
def test_random():
m = set()
for i in range(0, 2 ** 16):
n = _rand.rand64bits()
assert 0 <= n <= 2 ** 64 - 1
assert n not in m
m.add(n)
def test_fork_no_pid_check():
q = MPQueue()
pid = os.fork()
# Generate random numbers in the parent and child processes after forking.
# The child sends back their numbers to the parent where we check to see
# if we get collisions or not.
if pid > 0:
# parent
rns = {_rand.rand64bits() for _ in range(100)}
child_rns = q.get()
assert rns & child_rns == set()
else:
# child
try:
rngs = {_rand.rand64bits() for _ in range(100)}
q.put(rngs)
finally:
# Kill the process so it doesn't continue running the rest of the
# test suite in a separate process. Note we can't use sys.exit()
# as it raises an exception that pytest will detect as an error.
os._exit(0)
def test_fork_pid_check():
q = MPQueue()
pid = os.fork()
# Generate random numbers in the parent and child processes after forking.
# The child sends back their numbers to the parent where we check to see
# if we get collisions or not.
if pid > 0:
# parent
rns = {_rand.rand64bits() for _ in range(100)}
child_rns = q.get()
assert rns & child_rns == set()
else:
# child
try:
rngs = {_rand.rand64bits() for _ in range(100)}
q.put(rngs)
finally:
# Kill the process so it doesn't continue running the rest of the
# test suite in a separate process. Note we can't use sys.exit()
# as it raises an exception that pytest will detect as an error.
os._exit(0)
def _test_multiprocess_target(q):
assert sum((_ is _rand.seed for _ in forksafe._registry)) == 1
q.put([_rand.rand64bits() for _ in range(100)])
def test_multiprocess():
q = MPQueue()
ps = [mp.Process(target=_test_multiprocess_target, args=(q,)) for _ in range(30)]
for p in ps:
p.start()
for p in ps:
p.join()
assert p.exitcode == 0
ids_list = [_rand.rand64bits() for _ in range(1000)]
ids = set(ids_list)
assert len(ids_list) == len(ids), "Collisions found in ids"
while not q.empty():
child_ids_list = q.get()
child_ids = set(child_ids_list)
assert len(child_ids_list) == len(child_ids), "Collisions found in subprocess ids"
assert ids & child_ids == set()
ids = ids | child_ids # accumulate the ids
def _test_threadsafe_target(q):
# Generate a bunch of numbers to try to maximize the chance that
# two threads will be calling rand64bits at the same time.
rngs = [_rand.rand64bits() for _ in range(200000)]
q.put(rngs)
def test_threadsafe():
# Check that the PRNG is thread-safe.
# This obviously won't guarantee thread safety, but it's something
# at least.
# To provide some validation of this method I wrote a slow, unsafe RNG:
#
# state = 4101842887655102017
#
# def bad_random():
# global state
# state ^= state >> 21
# state ^= state << 35
# state ^= state >> 4
# return state * 2685821657736338717
#
# which consistently fails this test.
q = Queue()
ts = [threading.Thread(target=_test_threadsafe_target, args=(q,)) for _ in range(5)]
for t in ts:
t.start()
for t in ts:
t.join()
ids = set()
while not q.empty():
new_ids_list = q.get()
new_ids = set(new_ids_list)
assert len(new_ids) == len(new_ids_list), "Collision found in ids"
assert ids & new_ids == set()
ids = ids | new_ids
assert len(ids) > 0
def test_tracer_usage_fork():
q = MPQueue()
pid = os.fork()
# Similar test to test_fork() above except we use the tracer API.
# In this case we expect to never have collisions.
if pid > 0:
# parent
parent_ids_list = list(
chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(100)])
)
parent_ids = set(parent_ids_list)
assert len(parent_ids) == len(parent_ids_list), "Collisions found in parent process ids"
child_ids_list = q.get()
child_ids = set(child_ids_list)
assert len(child_ids) == len(child_ids_list), "Collisions found in child process ids"
assert parent_ids & child_ids == set()
else:
# child
try:
child_ids = list(
chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(100)])
)
q.put(child_ids)
finally:
# Kill the process so it doesn't continue running the rest of the
# test suite in a separate process. Note we can't use sys.exit()
# as it raises an exception that pytest will detect as an error.
os._exit(0)
def _test_tracer_usage_multiprocess_target(q):
ids_list = list(chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(10)]))
q.put(ids_list)
def test_tracer_usage_multiprocess():
q = MPQueue()
# Similar to test_multiprocess(), ensures that no collisions are
# generated between parent and child processes while using
# multiprocessing.
# Note that we have to be wary of the size of the underlying
# pipe in the queue: https://bugs.python.org/msg143081
ps = [mp.Process(target=_test_tracer_usage_multiprocess_target, args=(q,)) for _ in range(30)]
for p in ps:
p.start()
for p in ps:
p.join()
ids_list = list(chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(100)]))
ids = set(ids_list)
assert len(ids) == len(ids_list), "Collisions found in ids"
while not q.empty():
child_ids_list = q.get()
child_ids = set(child_ids_list)
assert len(child_ids) == len(child_ids_list), "Collisions found in subprocess ids"
assert ids & child_ids == set()
ids = ids | child_ids # accumulate the ids
def test_span_api_fork():
q = MPQueue()
pid = os.fork()
if pid > 0:
# parent
parent_ids_list = list(chain.from_iterable((s.span_id, s.trace_id) for s in [Span(None) for _ in range(100)]))
parent_ids = set(parent_ids_list)
assert len(parent_ids) == len(parent_ids_list), "Collisions found in parent process ids"
child_ids_list = q.get()
child_ids = set(child_ids_list)
assert len(child_ids) == len(child_ids_list), "Collisions found in child process ids"
assert parent_ids & child_ids == set()
else:
# child
try:
child_ids = list(chain.from_iterable((s.span_id, s.trace_id) for s in [Span(None) for _ in range(100)]))
q.put(child_ids)
finally:
os._exit(0)
|
py | 1a365dee9eac7399743d9db877e1a5aa0c6f8a7d | import unittest
class Test(unittest.TestCase):
def test(self):
# docs checkpoint 0
import numpy as np
import openmdao.api as om
from openaerostruct.geometry.utils import generate_mesh
from openaerostruct.geometry.geometry_group import Geometry
from openaerostruct.aerodynamics.aero_groups import AeroPoint
from openmdao.utils.assert_utils import assert_near_equal
from openaerostruct.utils.testing import assert_check_totals
# Create a dictionary to store options about the mesh
mesh_dict = {"num_y": 7, "num_x": 2, "wing_type": "CRM", "symmetry": True, "num_twist_cp": 5}
# Generate the aerodynamic mesh based on the previous dictionary
mesh, twist_cp = generate_mesh(mesh_dict)
# Create a dictionary with info and options about the aerodynamic
# lifting surface
surface = {
# Wing definition
"name": "wing", # name of the surface
"symmetry": True, # if true, model one half of wing
# reflected across the plane y = 0
"groundplane": True,
"S_ref_type": "wetted", # how we compute the wing area,
# can be 'wetted' or 'projected'
"fem_model_type": "tube",
"twist_cp": twist_cp,
"mesh": mesh,
# Aerodynamic performance of the lifting surface at
# an angle of attack of 0 (alpha=0).
# These CL0 and CD0 values are added to the CL and CD
# obtained from aerodynamic analysis of the surface to get
# the total CL and CD.
# These CL0 and CD0 values do not vary wrt alpha.
"CL0": 0.0, # CL of the surface at alpha=0
"CD0": 0.015, # CD of the surface at alpha=0
# Airfoil properties for viscous drag calculation
"k_lam": 0.05, # percentage of chord with laminar
# flow, used for viscous drag
"t_over_c_cp": np.array([0.15]), # thickness over chord ratio (NACA0015)
"c_max_t": 0.303, # chordwise location of maximum (NACA0015)
# thickness
"with_viscous": True, # if true, compute viscous drag
"with_wave": False, # if true, compute wave drag
}
# Create the OpenMDAO problem
prob = om.Problem()
# Create an independent variable component that will supply the flow
# conditions to the problem.
indep_var_comp = om.IndepVarComp()
indep_var_comp.add_output("v", val=248.136, units="m/s")
indep_var_comp.add_output("alpha", val=5.0, units="deg")
indep_var_comp.add_output("Mach_number", val=0.84)
indep_var_comp.add_output("re", val=1.0e6, units="1/m")
indep_var_comp.add_output("rho", val=0.38, units="kg/m**3")
indep_var_comp.add_output("cg", val=np.zeros((3)), units="m")
indep_var_comp.add_output("height_agl", val=8000.0, units="m")
# Add this IndepVarComp to the problem model
prob.model.add_subsystem("prob_vars", indep_var_comp, promotes=["*"])
# Create and add a group that handles the geometry for the
# aerodynamic lifting surface
geom_group = Geometry(surface=surface)
prob.model.add_subsystem(surface["name"], geom_group)
# Create the aero point group, which contains the actual aerodynamic
# analyses
aero_group = AeroPoint(surfaces=[surface])
point_name = "aero_point_0"
prob.model.add_subsystem(
point_name, aero_group, promotes_inputs=["v", "alpha", "Mach_number", "re", "rho", "cg", "height_agl"]
)
name = surface["name"]
# Connect the mesh from the geometry component to the analysis point
prob.model.connect(name + ".mesh", point_name + "." + name + ".def_mesh")
# Perform the connections with the modified names within the
# 'aero_states' group.
prob.model.connect(name + ".mesh", point_name + ".aero_states." + name + "_def_mesh")
prob.model.connect(name + ".t_over_c", point_name + "." + name + "_perf." + "t_over_c")
# Import the Scipy Optimizer and set the driver of the problem to use
# it, which defaults to an SLSQP optimization method
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options["tol"] = 1e-9
recorder = om.SqliteRecorder("aero.db")
prob.driver.add_recorder(recorder)
prob.driver.recording_options["record_derivatives"] = True
prob.driver.recording_options["includes"] = ["*"]
# Setup problem and add design variables, constraint, and objective
prob.model.add_design_var("height_agl", lower=10.0, upper=8000.0)
prob.model.add_design_var("wing.twist_cp", lower=-10.0, upper=15.0)
prob.model.add_constraint(point_name + ".wing_perf.CL", equals=0.5)
prob.model.add_objective(point_name + ".wing_perf.CD", scaler=1e4)
# Set up and run the optimization problem
prob.setup()
prob.run_driver()
# docs checkpoint 1
assert_near_equal(prob["aero_point_0.wing_perf.CD"][0], 0.033389699871650073, 1e-6)
assert_near_equal(prob["aero_point_0.wing_perf.CL"][0], 0.5, 1e-6)
assert_near_equal(prob["aero_point_0.CM"][1], -1.7885550372372376, 1e-6)
prob["height_agl"] = 10.0
prob.run_driver()
assert_near_equal(prob["aero_point_0.wing_perf.CD"][0], 0.029145613948518813, 1e-6)
assert_near_equal(prob["aero_point_0.wing_perf.CL"][0], 0.5, 1e-6)
assert_near_equal(prob["aero_point_0.CM"][1], -1.7719184423417516, 1e-6)
totals = prob.check_totals(
of=["aero_point_0.wing_perf.CD", "aero_point_0.wing_perf.CL"],
wrt=["wing.twist_cp", "height_agl"],
compact_print=True,
out_stream=None,
)
assert_check_totals(totals, atol=1e-2, rtol=1e-5)
if __name__ == "__main__":
unittest.main()
|
py | 1a36600976e4043c10dfea7d318808c29fd056ee | import json
from django.core.management.utils import get_random_secret_key
setting_json = json.load(open('settings.json', 'r'))
setting_json['secret_key'] = get_random_secret_key()
setting_file = open('dataexplore/settings.py', 'r+')
file_content = setting_file.read()
setting_file.close()
file_content = file_content.replace('SECRET_KEY_GENERATED', setting_json['secret_key'])
file_content = file_content.replace('HOST_NAME', setting_json['host_name'])
file_content = file_content.replace('DB_NAME', setting_json['database']['NAME'])
file_content = file_content.replace('DB_USER', setting_json['database']['USER'])
file_content = file_content.replace('DB_PWD', setting_json['database']['PASSWORD'])
file_content = file_content.replace('DB_HOST', setting_json['database']['HOST'])
file_content = file_content.replace('DB_PORT', setting_json['database']['PORT'])
with open('dataexplore/settings.py', 'w') as setting_file:
setting_file.write(file_content)
setting_file.close()
|
py | 1a366026bb992605ab45c9b48bf4e4f970138a3e | from unit_test_common import execute_csv2_command, initialize_csv2_request, ut_id, sanity_commands
from sys import argv
# lno: CV - error code identifier.
def main(gvar):
if not gvar:
gvar = {}
if len(argv) > 1:
initialize_csv2_request(gvar, selections=argv[1])
else:
initialize_csv2_request(gvar)
# 01 - 14
sanity_commands(gvar, 'cloud', 'metadata-list')
# 15
execute_csv2_command(
gvar, 1, None, 'The following command line arguments were invalid: metadata-mime-type',
['cloud', 'metadata-list', '-mmt', 'invalid-unit-test', '-g', ut_id(gvar, 'clg1'), '-su', ut_id(gvar, 'clu3')]
)
# 16
execute_csv2_command(
gvar, 0, None, None,
['cloud', 'metadata-list', '-NV', '-su', ut_id(gvar, 'clu3')],
expected_list='Clouds/Metadata', expected_columns={'Group', 'Cloud', 'Metadata Filename', 'Enabled', 'Priority', 'MIME Type'}
)
# 17
execute_csv2_command(
gvar, 0, None, 'Rows: 0',
['cloud', 'metadata-list', '--cloud-name', 'valid-unit-test', '-su', ut_id(gvar, 'clu3')],
expected_list='Clouds/Metadata'
)
# 18
execute_csv2_command(
gvar, 0, None, 'Rows: 0',
['cloud', 'metadata-list', '--metadata-name', 'valid-unit-test', '-su', ut_id(gvar, 'clu3')],
expected_list='Clouds/Metadata'
)
# 19
execute_csv2_command(
gvar, 0, None, 'Server: unit-test, Active User: {}, Active Group: {}'.format(ut_id(gvar, 'clu3'), ut_id(gvar, 'clg1')),
['cloud', 'metadata-list', '--cloud-name', ut_id(gvar, 'clc2'), '--metadata-name', ut_id(gvar, 'clm2'), '-su', ut_id(gvar, 'clu3')],
expected_list='Clouds/Metadata'
)
# 20
execute_csv2_command(
gvar, 0, None, 'Rows: 1',
['cloud', 'metadata-list', '--cloud-name', ut_id(gvar, 'clc2'), '--metadata-name', ut_id(gvar, 'clm2'), '-su', ut_id(gvar, 'clu3')],
expected_list='Clouds/Metadata'
)
# 21
execute_csv2_command(
gvar, 0, None, 'cloud metadata-list, 1. Clouds/Metadata: keys=group_name,cloud_name,metadata_name, columns=enabled,priority,mime_type',
['cloud', 'metadata-list', '--view-columns', '-su', ut_id(gvar, 'clu3')]
)
if __name__ == "__main__":
main(None)
|
py | 1a366047800e09400701c74c8d1f5661e8fe4f93 | """
Support for installing and building the "wheel" binary package format.
"""
from __future__ import absolute_import
import compileall
import csv
import errno
import hashlib
import logging
import os
import os.path
import re
import shutil
import stat
import sys
import tempfile
import warnings
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip._vendor.six import StringIO
import pip
from pip.compat import expanduser
from pip.download import path_to_url, unpack_url
from pip.exceptions import (
InstallationError, InvalidWheelFilename, UnsupportedWheel)
from pip.locations import distutils_scheme, PIP_DELETE_MARKER_FILENAME
from pip import pep425tags
from pip.utils import (
call_subprocess, ensure_dir, captured_stdout, rmtree, read_chunks,
)
from pip.utils.ui import open_spinner
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
wheel_ext = '.whl'
VERSION_COMPATIBLE = (1, 0)
logger = logging.getLogger(__name__)
class WheelCache(object):
"""A cache of wheels for future installs."""
def __init__(self, cache_dir, format_control):
"""Create a wheel cache.
:param cache_dir: The root of the cache.
:param format_control: A pip.index.FormatControl object to limit
binaries being read from the cache.
"""
self._cache_dir = expanduser(cache_dir) if cache_dir else None
self._format_control = format_control
def cached_wheel(self, link, package_name):
return cached_wheel(
self._cache_dir, link, self._format_control, package_name)
def _cache_for_link(cache_dir, link):
"""
Return a directory to store cached wheels in for link.
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were not
unique. E.g. ./package might have dozens of installs done for it and build
a version of 0.0...and if we built and cached a wheel, we'd end up using
the same wheel even if the source has been edited.
:param cache_dir: The cache_dir being used by pip.
:param link: The link of the sdist for which this will cache wheels.
"""
# We want to generate an url to use as our cache key, we don't want to just
# re-use the URL because it might have other items in the fragment and we
# don't care about those.
key_parts = [link.url_without_fragment]
if link.hash_name is not None and link.hash is not None:
key_parts.append("=".join([link.hash_name, link.hash]))
key_url = "#".join(key_parts)
# Encode our key url with sha224, we'll use this because it has similar
# security properties to sha256, but with a shorter total output (and thus
# less secure). However the differences don't make a lot of difference for
# our use case here.
hashed = hashlib.sha224(key_url.encode()).hexdigest()
# We want to nest the directories some to prevent having a ton of top level
# directories where we might run out of sub directories on some FS.
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
# Inside of the base location for cached wheels, expand our parts and join
# them all together.
return os.path.join(cache_dir, "wheels", *parts)
def cached_wheel(cache_dir, link, format_control, package_name):
if not cache_dir:
return link
if not link:
return link
if link.is_wheel:
return link
if not link.is_artifact:
return link
if not package_name:
return link
canonical_name = canonicalize_name(package_name)
formats = pip.index.fmt_ctl_formats(format_control, canonical_name)
if "binary" not in formats:
return link
root = _cache_for_link(cache_dir, link)
try:
wheel_names = os.listdir(root)
except OSError as e:
if e.errno in {errno.ENOENT, errno.ENOTDIR}:
return link
raise
candidates = []
for wheel_name in wheel_names:
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if not wheel.supported():
# Built for a different python/arch/etc
continue
candidates.append((wheel.support_index_min(), wheel_name))
if not candidates:
return link
candidates.sort()
path = os.path.join(root, candidates[0][1])
return pip.index.Link(path_to_url(path))
def rehash(path, algo='sha256', blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.new(algo)"""
h = hashlib.new(algo)
length = 0
with open(path, 'rb') as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, length)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = {'newline': ''}
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
# get the entry points and then the script names
entry_points = pkg_resources.EntryPoint.parse_map(data)
console = entry_points.get('console_scripts', {})
gui = entry_points.get('gui_scripts', {})
def _split_ep(s):
"""get the string representation of EntryPoint, remove space and split
on '='"""
return str(s).replace(" ", "").split("=")
# convert the EntryPoint objects into strings with module:function
console = dict(_split_ep(v) for v in console.values())
gui = dict(_split_ep(v) for v in gui.values())
return console, gui
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True, scheme=None, isolated=False, prefix=None):
"""Install a wheel"""
if not scheme:
scheme = distutils_scheme(
name, user=user, home=home, root=root, isolated=isolated,
prefix=prefix,
)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
compileall.compile_dir(source, force=True, quiet=True)
logger.debug(stdout.getvalue())
def normpath(src, p):
return os.path.relpath(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
ensure_dir(dest) # common for the 'include' path
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
destdir = os.path.join(dest, basedir)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base and
s.endswith('.dist-info') and
canonicalize_name(s).startswith(
canonicalize_name(req.name))):
assert not info_dir, ('Multiple .dist-info directories: ' +
destsubdir + ', ' +
', '.join(info_dir))
info_dir.append(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
# directory creation is lazy and after the file filtering above
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
ensure_dir(destdir)
# We use copyfile (not move, copy, or copy2) to be extra sure
# that we are not moving directories over (copyfile fails for
# directories) as well as to ensure that we are not copying
# over any metadata because we want more control over what
# metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Copy over the metadata for the file, currently this only
# includes the atime and mtime.
st = os.stat(srcfile)
if hasattr(os, "utime"):
os.utime(destfile, (st.st_atime, st.st_mtime))
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
st = os.stat(srcfile)
permissions = (
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
os.chmod(destfile, permissions)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = set(('', ))
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
if entry.suffix is None:
raise InstallationError(
"Invalid script entry point: %s for req: %s - A callable "
"suffix is required. Cf https://packaging.python.org/en/"
"latest/distributing.html#console-scripts for more "
"information." % (entry, req)
)
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = r"""# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadata 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated.extend(
maker.make_multiple(['%s = %s' % kv for kv in console.items()])
)
if len(gui) > 0:
generated.extend(
maker.make_multiple(
['%s = %s' % kv for kv in gui.items()],
{'gui': True}
)
)
# Record pip as the installer
installer = os.path.join(info_dir[0], 'INSTALLER')
temp_installer = os.path.join(info_dir[0], 'INSTALLER.pip')
with open(temp_installer, 'wb') as installer_file:
installer_file.write(b'pip\n')
shutil.move(temp_installer, installer)
generated.append(installer)
# Record details of all files installed
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((normpath(f, lib_dir), h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def wheel_version(source_dir):
"""
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return False if we couldn't parse / extract it.
"""
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except:
return False
def check_compatibility(version, name):
"""
Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if not version:
raise UnsupportedWheel(
"%s is in an unsupported or invalid wheel" % name
)
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warning(
'Installing from a newer Wheel-Version (%s)',
'.'.join(map(str, version)),
)
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE
)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename(
"%s is not a valid wheel filename." % filename
)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = set(
(x, y, z) for x in self.pyversions
for y in self.abis for z in self.plats
)
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.supported_tags
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.supported_tags
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, requirement_set, finder, build_options=None,
global_options=None):
self.requirement_set = requirement_set
self.finder = finder
self._cache_root = requirement_set._wheel_cache._cache_dir
self._wheel_dir = requirement_set.wheel_download_dir
self.build_options = build_options or []
self.global_options = global_options or []
def _build_one(self, req, output_dir, python_tag=None):
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
tempd = tempfile.mkdtemp('pip-wheel-')
try:
if self.__build_one(req, tempd, python_tag=python_tag):
try:
wheel_name = os.listdir(tempd)[0]
wheel_path = os.path.join(output_dir, wheel_name)
shutil.move(os.path.join(tempd, wheel_name), wheel_path)
logger.info('Stored in directory: %s', output_dir)
return wheel_path
except:
pass
# Ignore return, we can't do anything else useful.
self._clean_one(req)
return None
finally:
rmtree(tempd)
def _base_setup_args(self, req):
return [
sys.executable, "-u", '-c',
SETUPTOOLS_SHIM % req.setup_py
] + list(self.global_options)
def __build_one(self, req, tempd, python_tag=None):
base_args = self._base_setup_args(req)
spin_message = 'Running setup.py bdist_wheel for %s' % (req.name,)
with open_spinner(spin_message) as spinner:
logger.debug('Destination directory: %s', tempd)
wheel_args = base_args + ['bdist_wheel', '-d', tempd] \
+ self.build_options
if python_tag is not None:
wheel_args += ["--python-tag", python_tag]
try:
call_subprocess(wheel_args, cwd=req.setup_py_dir,
show_stdout=False, spinner=spinner)
return True
except:
spinner.finish("error")
logger.error('Failed building wheel for %s', req.name)
return False
def _clean_one(self, req):
base_args = self._base_setup_args(req)
logger.info('Running setup.py clean for %s', req.name)
clean_args = base_args + ['clean', '--all']
try:
call_subprocess(clean_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed cleaning build dir for %s', req.name)
return False
def build(self, autobuilding=False):
"""Build wheels.
:param unpack: If True, replace the sdist we built from with the
newly built wheel, in preparation for installation.
:return: True if all the wheels built correctly.
"""
assert self._wheel_dir or (autobuilding and self._cache_root)
# unpack sdists and constructs req set
self.requirement_set.prepare_files(self.finder)
reqset = self.requirement_set.requirements.values()
buildset = []
for req in reqset:
if req.constraint:
continue
if req.is_wheel:
if not autobuilding:
logger.info(
'Skipping %s, due to already being wheel.', req.name)
elif autobuilding and req.editable:
pass
elif autobuilding and req.link and not req.link.is_artifact:
pass
elif autobuilding and not req.source_dir:
pass
else:
if autobuilding:
link = req.link
base, ext = link.splitext()
if pip.index.egg_info_matches(base, None, link) is None:
# Doesn't look like a package - don't autobuild a wheel
# because we'll have no way to lookup the result sanely
continue
if "binary" not in pip.index.fmt_ctl_formats(
self.finder.format_control,
canonicalize_name(req.name)):
logger.info(
"Skipping bdist_wheel for %s, due to binaries "
"being disabled for it.", req.name)
continue
buildset.append(req)
if not buildset:
return True
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join([req.name for req in buildset]),
)
with indent_log():
build_success, build_failure = [], []
for req in buildset:
python_tag = None
if autobuilding:
python_tag = pep425tags.implementation_tag
output_dir = _cache_for_link(self._cache_root, req.link)
try:
ensure_dir(output_dir)
except OSError as e:
logger.warning("Building wheel for %s failed: %s",
req.name, e)
build_failure.append(req)
continue
else:
output_dir = self._wheel_dir
wheel_file = self._build_one(
req, output_dir,
python_tag=python_tag,
)
if wheel_file:
build_success.append(req)
if autobuilding:
# XXX: This is mildly duplicative with prepare_files,
# but not close enough to pull out to a single common
# method.
# The code below assumes temporary source dirs -
# prevent it doing bad things.
if req.source_dir and not os.path.exists(os.path.join(
req.source_dir, PIP_DELETE_MARKER_FILENAME)):
raise AssertionError(
"bad source dir - missing marker")
# Delete the source we built the wheel from
req.remove_temporary_source()
# set the build directory again - name is known from
# the work prepare_files did.
req.source_dir = req.build_location(
self.requirement_set.build_dir)
# Update the link for this.
req.link = pip.index.Link(
path_to_url(wheel_file))
assert req.link.is_wheel
# extract the wheel into the dir
unpack_url(
req.link, req.source_dir, None, False,
session=self.requirement_set.session)
else:
build_failure.append(req)
# notify success/failure
if build_success:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_success]),
)
if build_failure:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failure]),
)
# Return True if all builds were successful
return len(build_failure) == 0
|
py | 1a36605ac9385b167ba095fe82418085140701ff | # encoding: utf-8
import datetime
import logging
from sqlalchemy.sql import and_, or_
from sqlalchemy import orm, types, Column, Table, ForeignKey
from ckan.common import config
from ckan.model import (
meta,
core,
license as _license,
types as _types,
domain_object,
activity,
extension,
)
import ckan.lib.maintain as maintain
logger = logging.getLogger(__name__)
__all__ = ['Package', 'package_table', 'PackageMember', 'package_member_table',
'PACKAGE_NAME_MAX_LENGTH', 'PACKAGE_NAME_MIN_LENGTH',
'PACKAGE_VERSION_MAX_LENGTH',
]
PACKAGE_NAME_MAX_LENGTH = 100
PACKAGE_NAME_MIN_LENGTH = 2
PACKAGE_VERSION_MAX_LENGTH = 100
# Our Domain Object Tables
package_table = Table('package', meta.metadata,
Column('id', types.UnicodeText, primary_key=True, default=_types.make_uuid),
Column('name', types.Unicode(PACKAGE_NAME_MAX_LENGTH),
nullable=False, unique=True),
Column('title', types.UnicodeText, doc='remove_if_not_provided'),
Column('version', types.Unicode(PACKAGE_VERSION_MAX_LENGTH),
doc='remove_if_not_provided'),
Column('url', types.UnicodeText, doc='remove_if_not_provided'),
Column('author', types.UnicodeText, doc='remove_if_not_provided'),
Column('author_email', types.UnicodeText, doc='remove_if_not_provided'),
Column('maintainer', types.UnicodeText, doc='remove_if_not_provided'),
Column('maintainer_email', types.UnicodeText, doc='remove_if_not_provided'),
Column('notes', types.UnicodeText, doc='remove_if_not_provided'),
Column('license_id', types.UnicodeText, doc='remove_if_not_provided'),
Column('type', types.UnicodeText, default=u'dataset'),
Column('owner_org', types.UnicodeText),
Column('creator_user_id', types.UnicodeText),
Column('metadata_created', types.DateTime, default=datetime.datetime.utcnow),
Column('metadata_modified', types.DateTime, default=datetime.datetime.utcnow),
Column('private', types.Boolean, default=False),
Column('state', types.UnicodeText, default=core.State.ACTIVE),
)
package_member_table = Table(
'package_member',
meta.metadata,
Column('package_id', ForeignKey('package.id'), primary_key=True),
Column('user_id', ForeignKey('user.id'), primary_key = True),
Column('capacity', types.UnicodeText, nullable=False),
Column('modified', types.DateTime, default=datetime.datetime.utcnow),
)
## -------------------
## Mapped classes
class Package(core.StatefulObjectMixin,
domain_object.DomainObject):
text_search_fields = ['name', 'title']
def __init__(self, **kw):
from ckan import model
super(Package, self).__init__(**kw)
@classmethod
def search_by_name(cls, text_query):
text_query = text_query
return meta.Session.query(cls).filter(cls.name.contains(text_query.lower()))
@classmethod
def get(cls, reference, for_update=False):
'''Returns a package object referenced by its id or name.'''
if not reference:
return None
q = meta.Session.query(cls)
if for_update:
q = q.with_for_update()
pkg = q.get(reference)
if pkg == None:
pkg = cls.by_name(reference, for_update=for_update)
return pkg
# Todo: Make sure package names can't be changed to look like package IDs?
@property
def resources(self):
return [resource for resource in
self.resources_all
if resource.state != 'deleted']
def related_packages(self):
return [self]
def add_resource(self, url, format=u'', description=u'', hash=u'', **kw):
from ckan.model import resource
self.resources_all.append(resource.Resource(
package_id=self.id,
url=url,
format=format,
description=description,
hash=hash,
**kw)
)
def add_tag(self, tag):
import ckan.model as model
if tag in self.get_tags(tag.vocabulary):
return
else:
package_tag = model.PackageTag(self, tag)
meta.Session.add(package_tag)
def add_tags(self, tags):
for tag in tags:
self.add_tag(tag)
def add_tag_by_name(self, tag_name, vocab=None, autoflush=True):
"""Add a tag with the given name to this package's tags.
By default the given tag_name will be searched for among the free tags
(tags which do not belong to any vocabulary) only. If the optional
argument `vocab` is given then the named vocab will be searched for the
tag name instead.
If no tag with the given name is found, one will be created. If the
optional argument vocab is given and there is no tag with the given
name in the given vocabulary, then a new tag will be created and added
to the vocabulary.
"""
from ckan.model.tag import Tag
if not tag_name:
return
# Get the named tag.
tag = Tag.by_name(tag_name, vocab=vocab, autoflush=autoflush)
if not tag:
# Tag doesn't exist yet, make a new one.
if vocab:
tag = Tag(name=tag_name, vocabulary_id=vocab.id)
else:
tag = Tag(name=tag_name)
assert tag is not None
self.add_tag(tag)
def get_tags(self, vocab=None):
"""Return a sorted list of this package's tags
Tags are sorted by their names.
"""
import ckan.model as model
query = meta.Session.query(model.Tag)
query = query.join(model.PackageTag)
query = query.filter(model.PackageTag.tag_id == model.Tag.id)
query = query.filter(model.PackageTag.package_id == self.id)
query = query.filter(model.PackageTag.state == 'active')
if vocab:
query = query.filter(model.Tag.vocabulary_id == vocab.id)
else:
query = query.filter(model.Tag.vocabulary_id == None)
query = query.order_by(model.Tag.name)
tags = query.all()
return tags
def remove_tag(self, tag):
import ckan.model as model
query = meta.Session.query(model.PackageTag)
query = query.filter(model.PackageTag.package_id == self.id)
query = query.filter(model.PackageTag.tag_id == tag.id)
package_tag = query.one()
package_tag.delete()
meta.Session.commit()
def isopen(self):
if self.license and self.license.isopen():
return True
return False
def get_average_rating(self):
total = 0
for rating in self.ratings:
total += rating.rating
if total == 0:
return None
else:
return total / len(self.ratings)
def as_dict(self, ref_package_by='name', ref_group_by='name'):
_dict = domain_object.DomainObject.as_dict(self)
# Set 'license' in _dict to cater for old clients.
# Todo: Remove from Version 2?
_dict['license'] = self.license.title if self.license else _dict.get('license_id', '')
_dict['isopen'] = self.isopen()
tags = [tag.name for tag in self.get_tags()]
tags.sort() # so it is determinable
_dict['tags'] = tags
groups = [getattr(group, ref_group_by) for group in self.get_groups()]
groups.sort()
_dict['groups'] = groups
_dict['extras'] = {key: value for key, value in self.extras.items()}
_dict['ratings_average'] = self.get_average_rating()
_dict['ratings_count'] = len(self.ratings)
_dict['resources'] = [res.as_dict(core_columns_only=False) \
for res in self.resources]
site_url = config.get('ckan.site_url', None)
if site_url:
_dict['ckan_url'] = '%s/dataset/%s' % (site_url, self.name)
_dict['relationships'] = [rel.as_dict(self, ref_package_by=ref_package_by) for rel in self.get_relationships()]
_dict['metadata_modified'] = self.metadata_modified.isoformat() \
if self.metadata_modified else None
_dict['metadata_created'] = self.metadata_created.isoformat() \
if self.metadata_created else None
import ckan.lib.helpers as h
_dict['notes_rendered'] = h.render_markdown(self.notes)
_dict['type'] = self.type or u'dataset'
return _dict
def add_relationship(self, type_, related_package, comment=u''):
'''Creates a new relationship between this package and a
related_package. It leaves the caller to commit the change.
Raises KeyError if the type_ is invalid.
'''
from ckan.model import package_relationship
if type_ in package_relationship.PackageRelationship.get_forward_types():
subject = self
object_ = related_package
direction = "forward"
elif type_ in package_relationship.PackageRelationship.get_reverse_types():
type_ = package_relationship.PackageRelationship.reverse_to_forward_type(type_)
assert type_
subject = related_package
object_ = self
direction = "reverse"
else:
raise KeyError('Package relationship type: %r' % type_)
rels = self.get_relationships(with_package=related_package,
type=type_, active=False, direction=direction)
if rels:
rel = rels[0]
if comment:
rel.comment=comment
if rel.state == core.State.DELETED:
rel.undelete()
else:
rel = package_relationship.PackageRelationship(
subject=subject,
object=object_,
type=type_,
comment=comment)
meta.Session.add(rel)
return rel
def get_relationships(self, with_package=None, type=None, active=True,
direction='both'):
'''Returns relationships this package has.
Keeps stored type/ordering (not from pov of self).'''
assert direction in ('both', 'forward', 'reverse')
if with_package:
assert isinstance(with_package, Package)
from ckan.model.package_relationship import PackageRelationship
forward_filters = [PackageRelationship.subject==self]
reverse_filters = [PackageRelationship.object==self]
if with_package:
forward_filters.append(PackageRelationship.object==with_package)
reverse_filters.append(PackageRelationship.subject==with_package)
if active:
forward_filters.append(PackageRelationship.state==core.State.ACTIVE)
reverse_filters.append(PackageRelationship.state==core.State.ACTIVE)
if type:
forward_filters.append(PackageRelationship.type==type)
reverse_type = PackageRelationship.reverse_type(type)
reverse_filters.append(PackageRelationship.type==reverse_type)
q = meta.Session.query(PackageRelationship)
if direction == 'both':
q = q.filter(or_(
and_(*forward_filters),
and_(*reverse_filters),
))
elif direction == 'forward':
q = q.filter(and_(*forward_filters))
elif direction == 'reverse':
q = q.filter(and_(*reverse_filters))
return q.all()
def get_relationships_with(self, other_package, type=None, active=True):
return self.get_relationships(with_package=other_package,
type=type,
active=active)
def get_relationships_printable(self):
'''Returns a list of tuples describing related packages, including
non-direct relationships (such as siblings).
@return: e.g. [(annakarenina, u"is a parent"), ...]
'''
from ckan.model.package_relationship import PackageRelationship
rel_list = []
for rel in self.get_relationships():
if rel.subject == self:
type_printable = PackageRelationship.make_type_printable(rel.type)
rel_list.append((rel.object, type_printable, rel.comment))
else:
type_printable = PackageRelationship.make_type_printable(\
PackageRelationship.forward_to_reverse_type(
rel.type)
)
rel_list.append((rel.subject, type_printable, rel.comment))
# sibling types
# e.g. 'gary' is a child of 'mum', looking for 'bert' is a child of 'mum'
# i.e. for each 'child_of' type relationship ...
for rel_as_subject in self.get_relationships(direction='forward'):
if rel_as_subject.state != core.State.ACTIVE:
continue
# ... parent is the object
parent_pkg = rel_as_subject.object
# Now look for the parent's other relationships as object ...
for parent_rel_as_object in parent_pkg.get_relationships(direction='reverse'):
if parent_rel_as_object.state != core.State.ACTIVE:
continue
# and check children
child_pkg = parent_rel_as_object.subject
if (child_pkg != self and
parent_rel_as_object.type == rel_as_subject.type and
child_pkg.state == core.State.ACTIVE):
type_printable = PackageRelationship.inferred_types_printable['sibling']
rel_list.append((child_pkg, type_printable, None))
return sorted(list(set(rel_list)))
#
## Licenses are currently integrated into the domain model here.
@classmethod
def get_license_register(cls):
if not hasattr(cls, '_license_register'):
cls._license_register = _license.LicenseRegister()
return cls._license_register
@classmethod
def get_license_options(cls):
register = cls.get_license_register()
return [(l.title, l.id) for l in register.values()]
def get_license(self):
if self.license_id:
try:
license = self.get_license_register()[self.license_id]
except KeyError:
license = None
else:
license = None
return license
def set_license(self, license):
if type(license) == _license.License:
self.license_id = license.id
elif type(license) == dict:
self.license_id = license['id']
else:
msg = "Value not a license object or entity: %s" % repr(license)
raise Exception(msg)
license = property(get_license, set_license)
@property
@maintain.deprecated('`is_private` attriute of model.Package is ' +
'deprecated and should not be used. Use `private`')
def is_private(self):
"""
DEPRECATED in 2.1
A package is private if belongs to any private groups
"""
return self.private
def is_in_group(self, group):
return group in self.get_groups()
def get_groups(self, group_type=None, capacity=None):
import ckan.model as model
# Gets [ (group, capacity,) ...]
groups = model.Session.query(model.Group,model.Member.capacity).\
join(model.Member, model.Member.group_id == model.Group.id and \
model.Member.table_name == 'package' ).\
join(model.Package, model.Package.id == model.Member.table_id).\
filter(model.Member.state == 'active').\
filter(model.Member.table_id == self.id).all()
caps = [g[1] for g in groups]
groups = [g[0] for g in groups ]
if group_type:
groups = [g for g in groups if g.type == group_type]
if capacity:
groupcaps = zip( groups,caps )
groups = [g[0] for g in groupcaps if g[1] == capacity]
return groups
@staticmethod
def get_fields(core_only=False, fields_to_ignore=None):
'''Returns a list of the properties of a package.
@param core_only - limit it to fields actually in the package table and
not those on related objects, such as tags & extras.
@param fields_to_ignore - a list of names of fields to not return if
present.
'''
# ['id', 'name', 'title', 'version', 'url', 'author', 'author_email', 'maintainer', 'maintainer_email', 'notes', 'license_id', 'state']
fields = Package.revisioned_fields()
if not core_only:
fields += ['resources', 'tags', 'groups', 'extras', 'relationships']
if fields_to_ignore:
for field in fields_to_ignore:
fields.remove(field)
return fields
def activity_stream_item(self, activity_type, user_id):
import ckan.model
import ckan.logic
assert activity_type in ("new", "changed"), (
str(activity_type))
# Handle 'deleted' objects.
# When the user marks a package as deleted this comes through here as
# a 'changed' package activity. We detect this and change it to a
# 'deleted' activity.
if activity_type == 'changed' and self.state == u'deleted':
if meta.Session.query(activity.Activity).filter_by(
object_id=self.id, activity_type='deleted').all():
# A 'deleted' activity for this object has already been emitted
# FIXME: What if the object was deleted and then activated
# again?
return None
else:
# Emit a 'deleted' activity for this object.
activity_type = 'deleted'
try:
# We save the entire rendered package dict so we can support
# viewing the past packages from the activity feed.
dictized_package = ckan.logic.get_action('package_show')({
'model': ckan.model,
'session': ckan.model.Session,
'for_view': False, # avoid ckanext-multilingual translating it
'ignore_auth': True
}, {
'id': self.id,
'include_tracking': False
})
except ckan.logic.NotFound:
# This happens if this package is being purged and therefore has no
# current revision.
# TODO: Purge all related activity stream items when a model object
# is purged.
return None
actor = meta.Session.query(ckan.model.User).get(user_id)
return activity.Activity(
user_id,
self.id,
"%s package" % activity_type,
{
'package': dictized_package,
# We keep the acting user name around so that actions can be
# properly displayed even if the user is deleted in the future.
'actor': actor.name if actor else None
}
)
def set_rating(self, user_or_ip, rating):
'''Record a user's rating of this package.
The caller function is responsible for doing the commit.
If a rating is outside the range MAX_RATING - MIN_RATING then a
RatingValueException is raised.
@param user_or_ip - user object or an IP address string
'''
user = None
from ckan.model.user import User
from ckan.model.rating import Rating, MAX_RATING, MIN_RATING
if isinstance(user_or_ip, User):
user = user_or_ip
rating_query = meta.Session.query(Rating)\
.filter_by(package=self, user=user)
else:
ip = user_or_ip
rating_query = meta.Session.query(Rating)\
.filter_by(package=self, user_ip_address=ip)
try:
rating = float(rating)
except TypeError:
raise RatingValueException
except ValueError:
raise RatingValueException
if rating > MAX_RATING or rating < MIN_RATING:
raise RatingValueException
if rating_query.count():
rating_obj = rating_query.first()
rating_obj.rating = rating
elif user:
rating = Rating(package=self,
user=user,
rating=rating)
meta.Session.add(rating)
else:
rating = Rating(package=self,
user_ip_address=ip,
rating=rating)
meta.Session.add(rating)
@property
@maintain.deprecated()
def extras_list(self):
'''DEPRECATED in 2.9
Returns a list of the dataset's extras, as PackageExtra object
NB includes deleted ones too (state='deleted')
'''
from ckan.model.package_extra import PackageExtra
return meta.Session.query(PackageExtra) \
.filter_by(package_id=self.id) \
.all()
class PackageMember(domain_object.DomainObject):
pass
class RatingValueException(Exception):
pass
# import here to prevent circular import
from ckan.model import tag
meta.mapper(Package, package_table, properties={
# delete-orphan on cascade does NOT work!
# Why? Answer: because of way SQLAlchemy/our code works there are points
# where PackageTag object is created *and* flushed but does not yet have
# the package_id set (this cause us other problems ...). Some time later a
# second commit happens in which the package_id is correctly set.
# However after first commit PackageTag does not have Package and
# delete-orphan kicks in to remove it!
'package_tags':orm.relation(tag.PackageTag, backref='package',
cascade='all, delete', #, delete-orphan',
),
},
order_by=package_table.c.name,
extension=[extension.PluginMapperExtension()],
)
meta.mapper(tag.PackageTag, tag.package_tag_table, properties={
'pkg':orm.relation(Package, backref='package_tag_all',
cascade='none',
)
},
order_by=tag.package_tag_table.c.id,
extension=[extension.PluginMapperExtension()],
)
meta.mapper(PackageMember, package_member_table)
|
py | 1a366148859bacdbecb5a456aefaaf91be0f9b4d | # Owner(s): ["oncall: distributed"]
import sys
import torch
import torch.distributed as dist
from torch.distributed._sharded_tensor import (
shard_parameter,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._sharded_tensor._test_ops_common import (
generate_chunk_sharding_specs_for_test,
generate_local_weight_sharding_params_for_test,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestShardedEmbedding(ShardedTensorTestBase):
def _run_sharded_embedding(
self,
spec,
input_size,
num_embeddings,
embedding_dim,
sharded_dim=None,
max_norm=None,
norm_type=2.0,
padding_idx=None,
):
# Use same seed.
torch.manual_seed(0)
local_embedding = torch.nn.Embedding(
num_embeddings,
embedding_dim,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
).cuda(self.rank)
sharded_embedding = torch.nn.Embedding(
num_embeddings,
embedding_dim,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
)
# Copy the weights from local embedding
sharded_embedding.weight = torch.nn.Parameter(
local_embedding.weight.detach().clone()
)
# Shard the parameter.
shard_parameter(sharded_embedding, "weight", spec)
# Run sharded computation
torch.manual_seed(self.rank) # inputs different on each rank
inp = torch.randint(0, num_embeddings, tuple(input_size)).cuda(self.rank)
sharded_output = sharded_embedding(inp)
# If max_norm is set, we need to ensure that the renorm has been applied across
# inputs from all ranks.
if max_norm is not None:
gathered_inputs = [torch.zeros_like(inp) for _ in range(TEST_GPU_NUM)]
dist.all_gather(gathered_inputs, inp)
unique_inp = torch.unique(torch.cat(gathered_inputs))
local_embedding(unique_inp)
# Run local computation
local_output = local_embedding(inp)
# Compare local weight and shared one to ensure the renorm
# as expected.
if max_norm is not None:
sharded_weight = sharded_embedding.weight.local_shards()[0].tensor
(start_pos, chunk_size) = generate_local_weight_sharding_params_for_test(
local_embedding.weight, sharded_dim, TEST_GPU_NUM, spec, self.rank
)
local_weight_narrowed = local_embedding.weight.narrow(
sharded_dim, start_pos, chunk_size
)
self.assertEqual(local_weight_narrowed, sharded_weight)
# Verify
self.assertEqual(local_output, sharded_output)
# Validate for torch.nn.functional.embedding version.
local_output = torch.nn.functional.embedding(
inp,
local_embedding.weight,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
)
sharded_output = torch.nn.functional.embedding(
inp,
sharded_embedding.weight,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
)
self.assertEqual(local_output, sharded_output)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_embedding_colwise(self):
for spec in generate_chunk_sharding_specs_for_test(1):
self._run_sharded_embedding(spec, [5, 4], 17, 12)
self._run_sharded_embedding(spec, [6, 7, 6], 21, 11)
self._run_sharded_embedding(spec, [8, 6, 5, 4], 23, 13)
self._run_sharded_embedding(spec, [8, 6, 5, 4, 7], 23, 16)
self._run_sharded_embedding(spec, [4], 15, 14)
self._run_sharded_embedding(spec, [34], 15, 14, padding_idx=10)
self._run_sharded_embedding(spec, [8, 6, 5, 4], 23, 13, padding_idx=12)
self._run_sharded_embedding(
spec, [4, 5, 6], 23, 13, max_norm=2.5, sharded_dim=1
)
self._run_sharded_embedding(
spec, [12, 7, 16], 23, 13, max_norm=2.5, sharded_dim=1
)
self._run_sharded_embedding(
spec, [8, 16, 20], 12, 12, max_norm=1.25, norm_type=1.0, sharded_dim=1
)
self._run_sharded_embedding(spec, [30], 15, 14, max_norm=2.0, sharded_dim=1)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_embedding_rowwise(self):
for spec in generate_chunk_sharding_specs_for_test(0):
# Test even split.
self._run_sharded_embedding(spec, [5, 12], 16, 22)
self._run_sharded_embedding(spec, [5, 4], 32, 12)
self._run_sharded_embedding(spec, [6, 7, 6], 64, 11)
self._run_sharded_embedding(
spec, [5, 12], 16, 22, max_norm=2.5, sharded_dim=0
)
self._run_sharded_embedding(spec, [6, 7, 6], 64, 11, padding_idx=30)
self._run_sharded_embedding(
spec, [6, 5, 3], 26, 11, max_norm=2.0, sharded_dim=0
)
# Test uneven split.
self._run_sharded_embedding(spec, [8, 6, 5, 4], 19, 11)
self._run_sharded_embedding(spec, [6, 7, 6], 21, 11)
self._run_sharded_embedding(spec, [4], 21, 11)
self._run_sharded_embedding(spec, [8, 6, 5, 4], 21, 11, padding_idx=10)
self._run_sharded_embedding(
spec, [12, 16, 8], 27, 11, max_norm=2.0, sharded_dim=0
)
self._run_sharded_embedding(spec, [4], 14, 11, max_norm=2.5, sharded_dim=0)
if __name__ == "__main__":
run_tests()
|
py | 1a3661b38b573b2ca5dfa6ee7149d3179f6ea381 | #!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
# Copyright 2013 Alexey Kardapoltsev
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from deployutils import *
import copy
import time
import re
import sys
verbose = False
remoteExec = False
remoteHost = None
log_file = "/tmp/{}-deploy.log".format(COMPANY_NAME)
log = None
def shell(args):
_call(args.cmd.split())
def copy_scripts(args):
_call(["scp", "deployutils.py", "deploy-target.py", "{}:".format(args.target)])
def publish(args):
modules = _extract_modules(args)
stage = args.env
if args.target:
stage = stages[args.target]
_log("will publish {} modules to stage {}".format(modules, stage))
if args.clean:
_clean()
for m in modules:
_publish(m, stage)
if not args.no_docs:
_publish_docs(stage)
def publish_docs(args):
stage = args.env
if args.target:
stage = stages[args.target]
_log("will publish docs to stage {}".format(stage))
if args.clean:
_clean()
_call(["sbt", "compile"])
_publish_docs(stage)
def install(args):
modules = _extract_modules(args)
_log("installing {}".format(modules))
if args.env == "prod":
if not confirm("Are u really wanna install to prod?"):
_log("Good buy!")
sys.exit(0)
if args.target:
if args.update:
_update_target(args.target, args.full_update)
_log("will install {} to {}".format(modules, args.target))
_install(args.target, modules)
else:
env = environments[args.env]
for server in env:
seeds = []
if is_seed(server):
seeds = list(groups["seed"])
t_modules = set.intersection(modules, server["modules"] + seeds)
if t_modules:
if args.update:
_update_target(server["host"], args.full_update)
_log("will install {} to {}".format(t_modules, server["host"]))
_install(server["host"], t_modules)
def chick(args):
publish(copy.deepcopy(args))
args.update = True
install(args)
def restart_cluster(args):
env = environments[args.env]
# stop non seed modules
for server in env:
_check_version(server["host"])
modules = [m for m in server["modules"] if not m in groups["seed"]]
if modules:
_log("will stop {} at {}".format(" ".join(modules), server["host"]))
_call(["ssh", "{}".format(server["host"]), "sudo ~/deploy-target.py restart -a stop -m {}".format(" ".join(modules))])
# stop seed modules
for server in env:
_check_version(server["host"])
modules = [m for m in server["modules"] if m in groups["seed"]]
if modules:
_log("will stop {} at {}".format(" ".join(modules), server["host"]))
_call(["ssh", "{}".format(server["host"]), "sudo ~/deploy-target.py restart -a stop -m {}".format(" ".join(modules))])
# start seed
for server in env:
if is_seed(server):
_log("starting seed on {}".format(server["host"]))
for s in groups["seed"]:
_call(["ssh", "{}".format(server["host"]), "sudo ~/deploy-target.py restart -a start -m {}".format(s)])
# wait for seed start up
time.sleep(3)
# start all other modules
for server in env:
modules = list(server["modules"])
_log("starting {} on {}".format(" ".join(modules), server["host"]))
_call(["ssh", "{}".format(server["host"]), "sudo ~/deploy-target.py restart -a start -m {}".format(" ".join(modules))])
def restart_module(args):
modules = _extract_modules(args)
if not modules:
_log("Please specify at least one module or group")
_check_version(args.target)
for m in modules:
_call(["ssh", "{}".format(args.target), "sudo ~/deploy-target.py restart -a {} -m {}".format(args.action, m)])
def start(args):
if args.clean:
_clean()
modules = list(_extract_modules(args))
if not len(modules) == 1:
_log("Exact one module name expected")
sys.exit(1)
_start(modules[0], args.hostType, args.hostname)
def print_log(args):
with open(log_file, 'r') as fin:
print(fin.read())
def _check_version(target):
cmd = ["ssh", target, "~/deploy-target.py version"]
std = subprocess.check_output(cmd).decode("utf-8")
t_version = int(std)
if t_version < SCRIPT_VERSION:
_log("old version of script at {}, updating...".format(target))
_call(["scp", "deployutils.py", "deploy-target.py", "{}:".format(target)])
elif t_version > SCRIPT_VERSION:
_log("target version is newer than local script")
exit(1)
def _start(module, hostType, hostname):
_log("starting module {} with hostType {} on {}".format(module, hostType, hostname))
module_name = module[8:]
_call(["sbt", "'project {}'".format(module), "'runMain some.main.Class -t {} -h {}'".format(module_name, hostType, hostname)])
def _extract_modules(args):
modules = set()
if hasattr(args, "modules"):
for m in args.modules:
modules.add(m)
if hasattr(args, "groups"):
for g in args.groups:
for m in groups[g]:
modules.add(m)
return modules
def _restart(host, modules, action):
_check_version(host)
_call(["ssh", "{}".format(host),
"sudo ~/deploy-target.py restart -a {} -m {}".format(action, " ".join(modules))])
def _install(host, modules):
if(modules):
_check_version(host)
_log("installing modules {} to {}".format(" ".join(modules), host))
_call(["ssh", "{}".format(host), "sudo ~/deploy-target.py install -m {}".format(" ".join(modules))])
def _update_target(host, is_full):
_check_version(host)
if is_full:
_call(["ssh", "{}".format(host), "sudo ~/deploy-target.py update --full"])
else:
_call(["ssh", "{}".format(host), "sudo ~/deploy-target.py update"])
def _clean():
_log("cleaning...")
_call(["sbt", "clean"])
_call(["sbt", "update"])
def _publish(module, stage):
_log("publishing module {}".format(module))
_call(["sbt", "project {}".format(module), "set debRepoStage := \"{}\"".format(stage), "publishDebs"])
_base_docs_url = "http://doc.{}/docs/{}/"
_doc_user=""
_doc_password=""
def _publish_docs(stage):
_log("publishing docs to {}".format(stage))
try:
for schema in ["v1.api.json"]:
url = _base_docs_url.format(DOMAIN, stage) + schema
latest_schema = re.sub("v[\d]+", "latest", schema)
latest_url = _base_docs_url.format(DOMAIN, stage) + latest_schema
schemaPath = "schema/schemas/generated/{}".format(schema)
_call(["curl", "--user", "{}:{}".format(_doc_user, _doc_password), "-T", schemaPath, url])
_call(["curl", "--user", "{}:{}".format(_doc_user, _doc_password), "-T", schemaPath, latest_url])
#_call(["asciidoctor", "-o", "api.html", "api.ad"])
#_call(["curl", "--user", "{}:{}".format(_doc_user, _doc_password), "-T", "api.html", _base_docs_url.format(stage)])
_call(["curl", "--user", "{}:{}".format(_doc_user, _doc_password), "-T", "api_changes.md", _base_docs_url.format(stage)])
except Exception as e:
_log("ERROR: {}".format(e))
_log("docs was not published!")
pass
def _call(cmd):
_log("will execute {}".format(cmd))
exit_code = subprocess.call(cmd, stdout=log, stderr=log)
if exit_code != 0:
raise Exception("Failed to execute cmd: {}".format(cmd))
def _log(msg):
print(msg)
if log:
m = msg
if not m.endswith("\n"):
m = m + "\n"
m = time.strftime('%X %x') + ": " + m
log.write(m)
def _sync_sources():
sync_cmd = ['rsync', '--delete', '--exclude=.**', '--exclude=target', '--exclude=logs', '--exclude=__pycache__', '-avzh', '.', "{}:{}".format(remoteHost, REPO_NAME)]
exit_code = subprocess.call(sync_cmd, stdout=log, stderr=log)
topParser = argparse.ArgumentParser()
topParser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help = "do not redirect output to /dev/null")
topParser.add_argument("-r", "--remote", dest="remote", choices=["build00"], help = "execute all commands at the remote host")
subParsers = topParser.add_subparsers(title = "Command categories")
cleanParser = argparse.ArgumentParser(add_help = False)
cleanParser.add_argument("-c", "--clean", dest="clean", action="store_true", help = "run `sbt clean` before building")
noDocsParser = argparse.ArgumentParser(add_help = False)
noDocsParser.add_argument("--no-docs", dest="no_docs", action="store_true", help = "skip docs publishing")
updateParser = argparse.ArgumentParser(add_help = False)
updateParser.add_argument("--no-update", dest="update", action="store_false", help = "do not run apt-get update before installing")
updateParser.add_argument("--full-update", dest="full_update", default=False, action="store_true", help = "run apt-get update from all sources before installing")
startParser = subParsers.add_parser("start", description = "start backend module on local machine", parents = [cleanParser, modulesParser])
startParser.add_argument("-t", "--hosttype", dest="hostType", default="local", help = "backend host type", choices=["local"])
startParser.add_argument("-d", "--domain", dest="hostname", default="localhost", help = "akka hostname conf")
startParser.set_defaults(func = start)
shellParser = subParsers.add_parser("shell", description = "run shell command")
shellParser.add_argument("cmd")
shellParser.set_defaults(func = shell)
installParser = subParsers.add_parser("install", description = "installing backend modules to host",
parents = [modulesParser, groupsParser, hostParser, updateParser])
installParser.add_argument("-r", "--restart", dest="restart", action="store_true", help = "restart service after installation")
installParser.set_defaults(func = install)
publishParser = subParsers.add_parser("publish", description = "publishing deb to nexus repo", parents = [modulesParser, hostParser, groupsParser, cleanParser, noDocsParser])
publishParser.set_defaults(func = publish)
chickParser = subParsers.add_parser("chick", description = "hubot chick dev",
parents = [modulesParser, groupsParser, hostParser, cleanParser, updateParser, noDocsParser])
chickParser.set_defaults(func = chick)
deployParser = subParsers.add_parser("deploy", description = "deploy helper scripts to target", parents = [hostParser])
deployParser.set_defaults(func = copy_scripts)
deployDocsParser = subParsers.add_parser("publishdocs", description = "publish docs and api scheme", parents = [hostParser, cleanParser])
deployDocsParser.set_defaults(func = publish_docs)
restartParser = subParsers.add_parser("restart", description = "restart backend module", parents = [hostParser, modulesParser, groupsParser, actionParser])
restartParser.set_defaults(func = restart_module)
restartClusterParser = subParsers.add_parser("restartcluster", description = "start, stop backend", parents = [hostParser])
restartClusterParser.set_defaults(func = restart_cluster)
logParser = subParsers.add_parser("log", description = "print last deploy log to stdout")
logParser.set_defaults(func = print_log)
logParser.set_defaults(verbose = True) # in non verbose mode logs will be cleaned up at the beginning
try:
import argcomplete
argcomplete.autocomplete(topParser)
except ImportError:
print("Try install python argcomplete :)")
pass
parsed = topParser.parse_args()
start = time.time()
try:
if parsed.verbose:
verbose = True
else:
open(log_file, 'w').close() #clean up log file
log = open(log_file, 'a')
verbose = False
if parsed.remote:
remoteExec = True
remoteHost = parsed.remote
_sync_sources()
cmd = []
for a in sys.argv:
if a != "-r" and a != remoteHost:
cmd.append(a)
cmd = ["'" + arg + "'" for arg in cmd]
cmd = ["cd", REPO_NAME, ";"] + cmd
c = ' '.join(cmd)
cmd = ["ssh", remoteHost, c]
_call(cmd)
else:
parsed.func(parsed)
except Exception as e:
_log("ERROR: {}".format(e))
end = time.time()
_log("total time: {:.0f} sec".format(end - start))
sys.exit(1)
end = time.time()
_log("total time: {:.0f} sec".format(end - start))
# vim: set tabstop=8 expandtab shiftwidth=4 softtabstop=4:
|
py | 1a3662604a7592309feef0504ebcf82e62ab4b38 | import asyncio
import json
import os
import random
import unittest
from datetime import datetime, timedelta
import boto3
import pytest
import redislite
from mock import MagicMock, Mock, patch
from mockredis import mock_strict_redis_client
from moto import (
mock_config,
mock_dynamodb2,
mock_iam,
mock_s3,
mock_ses,
mock_sns,
mock_sqs,
mock_sts,
)
from tornado.concurrent import Future
# This must be set before loading ConsoleMe's configuration
os.environ["CONFIG_LOCATION"] = "example_config/example_config_test.yaml"
MOCK_ROLE = {
"arn": "arn:aws:iam::123456789012:role/FakeRole",
"name": "FakeRole",
"accountId": "123456789012",
"ttl": 1557325374,
"policy": {
"Path": "/",
"RoleId": "ABCDEFG",
"Arn": "arn:aws:iam::123456789012:role/FakeRole",
"CreateDate": "2019-01-15T22:55:53Z",
"AssumeRolePolicyDocument": {
"Version": "2008-10-17",
"Statement": [
{
"Sid": "2",
"Effect": "Allow",
"Principal": {"AWS": "arn:aws:iam::123456789012:role/FakeRole"},
"Action": "sts:AssumeRole",
},
{
"Sid": "1",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::123456789012:role/ConsoleMeInstanceProfile"
},
"Action": "sts:AssumeRole",
},
],
},
"Tags": [],
"AttachedManagedPolicies": [
{
"PolicyName": "test1-Example.com",
"PolicyArn": "arn:aws:iam::123456789012:policy/testPolicy",
}
],
"InstanceProfileList": [],
"RolePolicyList": [
{
"PolicyName": "iam",
"PolicyDocument": {
"Statement": [
{
"Action": [
"iam:GetAccountAuthorizationDetails",
"iam:GetRole",
"iam:GetRolePolicy",
"iam:ListInstanceProfiles",
"iam:ListInstanceProfilesForRole",
"iam:ListRolePolicies",
"iam:ListRoles",
"iam:ListAttachedRolePolicies",
"iam:ListRoleTags",
"s3:listallmybuckets",
"sqs:ListQueues",
"sqs:getqueueattributes",
"sns:ListTopics",
],
"Effect": "Allow",
"Resource": ["*"],
"Sid": "iam",
}
],
"Version": "2012-10-17",
},
}
],
},
"templated": "fake/file.json",
}
MOCK_REDIS_DB_PATH = "/tmp/consoleme_unit_test.rdb"
if os.path.exists(MOCK_REDIS_DB_PATH):
os.remove(MOCK_REDIS_DB_PATH)
if os.path.exists(f"{MOCK_REDIS_DB_PATH}.settings"):
os.remove(f"{MOCK_REDIS_DB_PATH}.settings")
all_roles = None
class AioTestCase(unittest.TestCase):
# noinspection PyPep8Naming
def __init__(self, methodName="runTest", loop=None):
self.loop = loop or asyncio.get_event_loop()
self._function_cache = {}
super(AioTestCase, self).__init__(methodName=methodName)
def coroutine_function_decorator(self, func):
def wrapper(*args, **kw):
return self.loop.run_until_complete(func(*args, **kw))
return wrapper
def __getattribute__(self, item):
attr = object.__getattribute__(self, item)
if asyncio.iscoroutinefunction(attr):
if item not in self._function_cache:
self._function_cache[item] = self.coroutine_function_decorator(attr)
return self._function_cache[item]
return attr
class MockBaseHandler:
async def authorization_flow(
self, user=None, console_only=True, refresh_cache=False
):
self.user = "[email protected]"
self.ip = "1.2.3.4"
self.groups = ["group1", "group2"]
self.contractor = False
self.red = mock_strict_redis_client()
class MockBaseMtlsHandler:
async def authorization_flow_user(self):
self.request_uuid = 1234
self.ip = "1.2.3.4"
self.requester = {"type": "user"}
async def authorization_flow_app(self):
self.request_uuid = 1234
self.ip = "1.2.3.4"
self.requester = {"type": "application", "name": "fakeapp"}
class MockAuth:
def __init__(
self, restricted=False, compliance_restricted=False, get_groups_val=None
):
if get_groups_val is None:
get_groups_val = []
self.restricted = restricted
self.compliance_restricted = compliance_restricted
self.get_groups_val = get_groups_val
async def get_groups(self, *kvargs):
return self.get_groups_val
class MockRedis:
def __init__(self, return_value=None):
self.return_value = return_value
def get(self, tag):
print(f"MockRedis GET called with argument {tag}")
return self.return_value
def setex(self, *args):
print(f"MockRedis SETEX called with args {args}")
def hgetall(self, *args):
print(f"MockRedis HGETALL called with args {args}")
return self.return_value
class MockRedisHandler:
def __init__(self, return_value=None):
self.return_value = return_value
async def redis(self):
redis_client = MockRedis(return_value=self.return_value)
return redis_client
mock_accountdata_redis = MagicMock(
return_value=MockRedisHandler(
return_value=json.dumps(
{"123456789012": ["awsaccount", "[email protected]"]}
)
)
)
class AWSHelper:
async def random_account_id(self):
return str(random.randrange(100000000000, 999999999999))
@pytest.fixture(scope="session")
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ["AWS_ACCESS_KEY_ID"] = "testing"
os.environ["AWS_SECRET_ACCESS_KEY"] = "testing"
os.environ["AWS_SECURITY_TOKEN"] = "testing"
os.environ["AWS_SESSION_TOKEN"] = "testing"
@pytest.fixture(autouse=True, scope="session")
def sts(aws_credentials):
"""Mocked STS Fixture."""
with mock_sts():
yield boto3.client("sts", region_name="us-east-1")
@pytest.fixture(autouse=True, scope="session")
def iam(aws_credentials):
"""Mocked IAM Fixture."""
with mock_iam():
yield boto3.client("iam", region_name="us-east-1")
@pytest.fixture(autouse=True, scope="session")
def aws_config(aws_credentials):
"""Mocked Config Fixture."""
with mock_config():
yield boto3.client("config", region_name="us-east-1")
@pytest.fixture(autouse=True, scope="session")
def s3(aws_credentials):
"""Mocked S3 Fixture."""
with mock_s3():
yield boto3.client("s3", region_name="us-east-1")
@pytest.fixture(autouse=True, scope="session")
def ses(aws_credentials):
"""Mocked SES Fixture."""
with mock_ses():
client = boto3.client("ses", region_name="us-east-1")
client.verify_email_address(EmailAddress="[email protected]")
yield client
@pytest.fixture(autouse=True, scope="session")
def sqs(aws_credentials):
"""Mocked SQS Fixture."""
with mock_sqs():
yield boto3.client("sqs", region_name="us-east-1")
@pytest.fixture(autouse=True, scope="session")
def sns(aws_credentials):
"""Mocked S3 Fixture."""
with mock_sns():
yield boto3.client("sns", region_name="us-east-1")
@pytest.fixture(autouse=True, scope="session")
def create_default_resources(s3, iam, redis, iam_sync_roles, iamrole_table):
from asgiref.sync import async_to_sync
from consoleme.config import config
from consoleme.lib.cache import store_json_results_in_redis_and_s3
global all_roles
buckets = [config.get("cache_roles_across_accounts.all_roles_combined.s3.bucket")]
for bucket in buckets:
s3.create_bucket(Bucket=bucket)
if all_roles:
async_to_sync(store_json_results_in_redis_and_s3)(
all_roles,
s3_bucket=config.get(
"cache_roles_across_accounts.all_roles_combined.s3.bucket"
),
s3_key=config.get("cache_roles_across_accounts.all_roles_combined.s3.file"),
)
return
from consoleme.celery.celery_tasks import cache_roles_for_account
from consoleme.lib.account_indexers import get_account_id_to_name_mapping
from consoleme.lib.redis import RedisHandler
red = RedisHandler().redis_sync()
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
for account_id in accounts_d.keys():
cache_roles_for_account(account_id)
cache_key = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
all_roles = red.hgetall(cache_key)
async_to_sync(store_json_results_in_redis_and_s3)(
all_roles,
s3_bucket=config.get(
"cache_roles_across_accounts.all_roles_combined.s3.bucket"
),
s3_key=config.get("cache_roles_across_accounts.all_roles_combined.s3.file"),
)
@pytest.fixture(autouse=True, scope="session")
def dynamodb(aws_credentials):
"""Mocked DynamoDB Fixture."""
with mock_dynamodb2():
# Remove the config value for the DynamoDB Server
from consoleme.config.config import CONFIG
old_value = CONFIG.config.pop("dynamodb_server", None)
yield boto3.client("dynamodb", region_name="us-east-1")
# Reset the config value:
CONFIG.config["dynamodb_server"] = old_value
@pytest.fixture(autouse=True, scope="session")
def retry():
"""Mock the retry library so that it doesn't retry."""
class MockRetry:
def __init__(self, *args, **kwargs):
pass
def call(self, f, *args, **kwargs):
return f(*args, **kwargs)
patch_retry = patch("retrying.Retrying", MockRetry)
yield patch_retry.start()
patch_retry.stop()
@pytest.fixture(autouse=True, scope="session")
def iamrole_table(dynamodb):
# Create the table:
dynamodb.create_table(
TableName="consoleme_iamroles_global",
AttributeDefinitions=[
{"AttributeName": "arn", "AttributeType": "S"},
{"AttributeName": "accountId", "AttributeType": "S"},
],
KeySchema=[
{"AttributeName": "arn", "KeyType": "HASH"},
{"AttributeName": "accountId", "KeyType": "RANGE"},
],
ProvisionedThroughput={"ReadCapacityUnits": 1000, "WriteCapacityUnits": 1000},
)
# Apply a TTL:
dynamodb.update_time_to_live(
TableName="consoleme_iamroles_global",
TimeToLiveSpecification={"Enabled": True, "AttributeName": "ttl"},
)
yield dynamodb
@pytest.fixture(autouse=True, scope="session")
def policy_requests_table(dynamodb):
# Create the table:
dynamodb.create_table(
TableName="consoleme_policy_requests",
KeySchema=[{"AttributeName": "request_id", "KeyType": "HASH"}], # Partition key
AttributeDefinitions=[
{"AttributeName": "request_id", "AttributeType": "S"},
{"AttributeName": "arn", "AttributeType": "S"},
],
GlobalSecondaryIndexes=[
{
"IndexName": "arn-request_id-index",
"KeySchema": [{"AttributeName": "arn", "KeyType": "HASH"}],
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {
"ReadCapacityUnits": 123,
"WriteCapacityUnits": 123,
},
}
],
ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
)
yield dynamodb
@pytest.fixture(autouse=True, scope="session")
def requests_table(dynamodb):
# Create the table:
dynamodb.create_table(
TableName="consoleme_requests_global",
AttributeDefinitions=[{"AttributeName": "request_id", "AttributeType": "S"}],
KeySchema=[{"AttributeName": "request_id", "KeyType": "HASH"}],
ProvisionedThroughput={"ReadCapacityUnits": 1000, "WriteCapacityUnits": 1000},
)
yield dynamodb
@pytest.fixture(autouse=True, scope="session")
def users_table(dynamodb):
# Create the table:
dynamodb.create_table(
TableName="consoleme_users_global",
AttributeDefinitions=[{"AttributeName": "username", "AttributeType": "S"}],
KeySchema=[{"AttributeName": "username", "KeyType": "HASH"}],
ProvisionedThroughput={"ReadCapacityUnits": 1000, "WriteCapacityUnits": 1000},
)
yield dynamodb
@pytest.fixture(autouse=True, scope="session")
def dummy_requests_data(requests_table):
user = {
"request_id": {"S": "abc-def-ghi"},
"aws:rep:deleting": {"BOOL": False},
"aws:rep:updateregion": {"S": "us-west-2"},
"aws:rep:updatetime": {"N": "1547848006"},
"group": {"S": "test_group"},
"justification": {"S": "some reason"},
"last_updated": {"N": "1245678901"},
"request_time": {"N": "1234567890"},
"status": {"S": "pending"},
"updated_by": {"S": "[email protected]"},
"username": {"S": "[email protected]"},
"reviewer_commnets": {"S": "All the access!"},
}
from consoleme.lib.dynamo import BaseDynamoHandler
requests_table.put_item(
TableName="consoleme_requests_global",
Item=BaseDynamoHandler()._data_to_dynamo_replace(user),
)
yield requests_table
@pytest.fixture(autouse=True, scope="session")
def dummy_users_data(users_table):
user = {
"username": {"S": "[email protected]"},
"aws:rep:deleting": {"BOOL": False},
"aws:rep:updateregion": {"S": "us-west-2"},
"last_udpated": {"N": "1547848006"},
"requests": {"L": [{"S": "abc-def-ghi"}]},
}
from consoleme.lib.dynamo import BaseDynamoHandler
users_table.put_item(
TableName="consoleme_users_global",
Item=BaseDynamoHandler()._data_to_dynamo_replace(user),
)
yield users_table
@pytest.fixture(autouse=True, scope="session")
def iam_sync_roles(iam):
statement_policy = json.dumps(
{
"Statement": [{"Effect": "Deny", "Action": "*", "Resource": "*"}],
"Version": "2012-10-17",
}
)
assume_role_policy = json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::123456789012:role/ConsoleMeInstanceProfile"
},
"Action": "sts:AssumeRole",
}
],
}
)
# Create the role that CloudAux will assume:
iam.create_role(RoleName="ConsoleMe", AssumeRolePolicyDocument=assume_role_policy)
# Create a generic test instance profile
iam.create_role(
RoleName="TestInstanceProfile", AssumeRolePolicyDocument=assume_role_policy
)
# Create a managed policy:
policy_one = iam.create_policy(
PolicyName="policy-one", PolicyDocument=statement_policy
)["Policy"]["Arn"]
policy_two = iam.create_policy(
PolicyName="policy-two", PolicyDocument=statement_policy
)["Policy"]["Arn"]
# Create 50 IAM roles for syncing:
for x in range(0, 10):
iam.create_role(
RoleName=f"RoleNumber{x}", AssumeRolePolicyDocument=assume_role_policy
)
iam.put_role_policy(
RoleName=f"RoleNumber{x}",
PolicyName="SomePolicy",
PolicyDocument=statement_policy,
)
iam.tag_role(
RoleName=f"RoleNumber{x}",
Tags=[
{"Key": "Number", "Value": f"{x}"},
{"Key": "authorized_groups", "Value": f"group{x}:group{x}@example.com"},
{
"Key": "authorized_groups_cli_only",
"Value": f"group{x}-cli:group{x}[email protected]",
},
],
)
iam.attach_role_policy(RoleName=f"RoleNumber{x}", PolicyArn=policy_one)
iam.attach_role_policy(RoleName=f"RoleNumber{x}", PolicyArn=policy_two)
# Create the dynamic user role:
iam.create_role(
RoleName="awsaccount_user", AssumeRolePolicyDocument=assume_role_policy
)
iam.put_role_policy(
RoleName="awsaccount_user",
PolicyName="SomePolicy",
PolicyDocument=statement_policy,
)
iam.attach_role_policy(RoleName="awsaccount_user", PolicyArn=policy_one)
# Create another dynamic user role
iam.create_role(
RoleName="cm_someuser_N", AssumeRolePolicyDocument=assume_role_policy
)
iam.put_role_policy(
RoleName="cm_someuser_N",
PolicyName="SomePolicy",
PolicyDocument=statement_policy,
)
iam.attach_role_policy(RoleName="cm_someuser_N", PolicyArn=policy_one)
iam.create_role(RoleName="rolename", AssumeRolePolicyDocument=assume_role_policy)
iam.attach_role_policy(RoleName="rolename", PolicyArn=policy_one)
yield iam
@pytest.fixture(autouse=True, scope="session")
def www_user():
return json.loads(
"""{
"Path": "/",
"RoleName": "rolename",
"RoleId": "AROAI5FHPGAEE6FRM5Q2Y",
"Arn": "arn:aws:iam::123456789012:role/rolename",
"CreateDate": "2017-10-06T22:07:23Z",
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::123456789012:saml-provider/saml"
},
"Action": "sts:AssumeRoleWithSAML",
"Condition": {
"StringEquals": {
"SAML:aud": "https://signin.aws.amazon.com/saml"
}
}
},
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::123456789012:role/consoleme"
},
"Action": "sts:AssumeRole"
}
]
},
"InstanceProfileList": [],
"RolePolicyList": [
{
"PolicyName": "user",
"PolicyDocument": {
"Statement": [
{
"Action": [
"ec2:Describe*",
"lambda:Describe*",
"sns:List*",
"sqs:List*"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"iam:List*"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
}
}
],
"AttachedManagedPolicies": [
{
"PolicyName": "Abc",
"PolicyArn": "arn:aws:iam::123456789012:policy/Abc"
},
{
"PolicyName": "Encrypt",
"PolicyArn": "arn:aws:iam::123456789012:policy/Encrypt"
},
{
"PolicyName": "ReadOnlyAccess",
"PolicyArn": "arn:aws:iam::aws:policy/ReadOnlyAccess"
},
{
"PolicyName": "Tag",
"PolicyArn": "arn:aws:iam::123456789012:policy/Tag"
}
],
"Tags": []
}"""
)
class FakeRedis(redislite.StrictRedis):
def __init__(self, *args, **kwargs):
if kwargs.get("connection_pool"):
del kwargs["connection_pool"]
super(FakeRedis, self).__init__(
MOCK_REDIS_DB_PATH, *args, **kwargs, decode_responses=True
)
@pytest.fixture(autouse=True, scope="session")
def redis(session_mocker):
session_mocker.patch("redis.Redis", FakeRedis)
session_mocker.patch("redis.StrictRedis", FakeRedis)
session_mocker.patch("consoleme.lib.redis.redis.StrictRedis", FakeRedis)
session_mocker.patch("consoleme.lib.redis.redis.Redis", FakeRedis)
session_mocker.patch(
"consoleme.lib.redis.RedisHandler.redis_sync", return_value=FakeRedis()
)
session_mocker.patch(
"consoleme.lib.redis.RedisHandler.redis", return_value=FakeRedis()
)
return True
class MockParliament:
def __init__(self, return_value=None):
self.return_value = return_value
@property
def findings(self):
return self.return_value
class Finding:
issue = ""
detail = ""
location = {}
severity = ""
title = ""
description = ""
def __init__(
self,
issue,
detail,
location,
severity,
title,
description,
):
self.issue = issue
self.detail = detail
self.location = location
self.severity = severity
self.title = title
self.description = description
@pytest.fixture(scope="session")
def parliament(session_mocker):
session_mocker.patch(
"parliament.analyze_policy_string",
return_value=MockParliament(
return_value=[
{
"issue": "RESOURCE_MISMATCH",
"title": "No resources match for the given action",
"severity": "MEDIUM",
"description": "",
"detail": [
{"action": "s3:GetObject", "required_format": "arn:*:s3:::*/*"}
],
"location": {"line": 3, "column": 18, "filepath": "test.json"},
}
]
),
)
session_mocker.patch(
"parliament.enhance_finding",
return_value=Finding(
issue="RESOURCE_MISMATCH",
title="No resources match for the given action",
severity="MEDIUM",
description="",
detail="",
location={},
),
)
@pytest.fixture(scope="session")
def user_iam_role(iamrole_table, www_user):
from consoleme.lib.dynamo import IAMRoleDynamoHandler
ddb = IAMRoleDynamoHandler()
role_entry = {
"arn": www_user.pop("Arn"),
"name": www_user.pop("RoleName"),
"accountId": "123456789012",
"ttl": int((datetime.utcnow() + timedelta(hours=36)).timestamp()),
"policy": ddb.convert_role_to_json(www_user),
}
ddb.sync_iam_role_for_account(role_entry)
@pytest.fixture(autouse=True, scope="session")
def mock_exception_stats():
p = patch("consoleme.exceptions.exceptions.get_plugin_by_name")
yield p.start()
p.stop()
@pytest.fixture(autouse=True, scope="session")
def mock_celery_stats(mock_exception_stats):
p = patch("consoleme.celery.celery_tasks.stats")
yield p.start()
p.stop()
@pytest.fixture(scope="session")
def mock_async_http_client():
p_return_value = Mock()
p_return_value.body = "{}"
p = patch("tornado.httpclient.AsyncHTTPClient")
p.return_value.fetch.return_value = create_future(p_return_value)
yield p.start()
p.stop()
@pytest.fixture(autouse=True, scope="session")
def populate_caches(
redis,
user_iam_role,
iam_sync_roles,
dummy_users_data,
dummy_requests_data,
policy_requests_table,
iamrole_table,
create_default_resources,
s3,
sns,
sqs,
iam,
www_user,
parliament,
):
from asgiref.sync import async_to_sync
from consoleme.celery import celery_tasks as celery
from consoleme.lib.account_indexers import get_account_id_to_name_mapping
from consoleme_default_plugins.plugins.celery_tasks import (
celery_tasks as default_celery_tasks,
)
celery.cache_cloud_account_mapping()
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
default_celery_tasks.cache_application_information()
for account_id in accounts_d.keys():
celery.cache_roles_for_account(account_id)
celery.cache_s3_buckets_for_account(account_id)
celery.cache_sns_topics_for_account(account_id)
celery.cache_sqs_queues_for_account(account_id)
celery.cache_managed_policies_for_account(account_id)
# celery.cache_resources_from_aws_config_for_account(account_id) # No select_resource_config in moto yet
celery.cache_policies_table_details()
celery.cache_policy_requests()
celery.cache_credential_authorization_mapping()
class MockAioHttpResponse:
status = 200
responses = []
@classmethod
async def json(cls):
try:
return cls.responses.pop(0)
except Exception: # noqa
return []
class MockAioHttpRequest:
@classmethod
async def get(cls, *args, **kwargs):
return MockAioHttpResponse()
@classmethod
async def post(cls, *args, **kwargs):
return MockAioHttpResponse()
def create_future(ret_val=None):
future = Future()
future.set_result(ret_val)
return future
|
py | 1a36632291132eec235a3405d9ca930ff5438903 | import unittest
import cupy
from cupy import testing
class TestCArray(unittest.TestCase):
def test_size(self):
x = cupy.arange(3).astype('i')
y = cupy.ElementwiseKernel(
'raw int32 x', 'int32 y', 'y = x.size()', 'test_carray_size',
)(x, size=1)
self.assertEqual(int(y[0]), 3)
def test_shape(self):
x = cupy.arange(6).reshape((2, 3)).astype('i')
y = cupy.ElementwiseKernel(
'raw int32 x', 'int32 y', 'y = x.shape()[i]', 'test_carray_shape',
)(x, size=2)
testing.assert_array_equal(y, (2, 3))
def test_strides(self):
x = cupy.arange(6).reshape((2, 3)).astype('i')
y = cupy.ElementwiseKernel(
'raw int32 x', 'int32 y', 'y = x.strides()[i]',
'test_carray_strides',
)(x, size=2)
testing.assert_array_equal(y, (12, 4))
def test_getitem_int(self):
x = cupy.arange(24).reshape((2, 3, 4)).astype('i')
y = cupy.empty_like(x)
y = cupy.ElementwiseKernel(
'raw T x', 'int32 y', 'y = x[i]', 'test_carray_getitem_int',
)(x, y)
testing.assert_array_equal(y, x)
def test_getitem_idx(self):
x = cupy.arange(24).reshape((2, 3, 4)).astype('i')
y = cupy.empty_like(x)
y = cupy.ElementwiseKernel(
'raw T x', 'int32 y',
'ptrdiff_t idx[] = {i / 12, i / 4 % 3, i % 4}; y = x[idx]',
'test_carray_getitem_idx',
)(x, y)
testing.assert_array_equal(y, x)
|
py | 1a3663b72d4bf91acb5be54bfdbd4f4d5c8c99d6 | from functools import partial
from typing import (
AsyncIterator,
Callable,
Type,
)
from async_generator import asynccontextmanager
from async_service import background_asyncio_service
from p2p.abc import ConnectionAPI
from .abc import ExchangeAPI, NormalizerAPI, ValidatorAPI
from .candidate_stream import ResponseCandidateStream
from .manager import ExchangeManager
from .typing import TResult, TRequestCommand, TResponseCommand
class BaseExchange(ExchangeAPI[TRequestCommand, TResponseCommand, TResult]):
_request_command_type: Type[TRequestCommand]
_response_command_type: Type[TResponseCommand]
_manager: ExchangeManager[TRequestCommand, TResponseCommand, TResult]
def __init__(self) -> None:
self.tracker = self.tracker_class()
@asynccontextmanager
async def run_exchange(self, connection: ConnectionAPI) -> AsyncIterator[None]:
protocol = connection.get_protocol_for_command_type(self.get_request_cmd_type())
response_stream: ResponseCandidateStream[TRequestCommand, TResponseCommand] = ResponseCandidateStream( # noqa: E501
connection,
protocol,
self.get_response_cmd_type(),
)
async with background_asyncio_service(response_stream):
self._manager = ExchangeManager(
connection,
response_stream,
)
yield
async def get_result(
self,
request: TRequestCommand,
normalizer: NormalizerAPI[TResponseCommand, TResult],
result_validator: ValidatorAPI[TResult],
payload_validator: Callable[[TRequestCommand, TResponseCommand], None],
timeout: float = None) -> TResult:
"""
This is a light convenience wrapper around the ExchangeManager's get_result() method.
It makes sure that:
- the manager service is running
- the payload validator is primed with the request payload
"""
# bind the outbound request payload to the payload validator
message_validator = partial(payload_validator, request.payload)
return await self._manager.get_result(
request,
normalizer,
result_validator.validate_result,
message_validator,
self.tracker,
timeout,
)
@classmethod
def get_response_cmd_type(cls) -> Type[TResponseCommand]:
return cls._response_command_type
@classmethod
def get_request_cmd_type(cls) -> Type[TRequestCommand]:
return cls._request_command_type
@property
def is_requesting(self) -> bool:
return self._manager.is_requesting
|
py | 1a3665066fbc1a155c05bd47ce4538a75896c032 | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class Probe(object):
def __init__(self, initialDelaySeconds=None, periodSeconds=None, timeoutSeconds=None, failureThreshold=None, successThreshold=None, exec=None, httpGet=None, tcpSocket=None):
"""
:param initialDelaySeconds: (Optional) 容器启动多久后触发探针。
:param periodSeconds: (Optional) 探测的时间间隔。
:param timeoutSeconds: (Optional) 探测的超时时间。
:param failureThreshold: (Optional) 在成功状态后,连续探活失败的次数,认为探活失败。
:param successThreshold: (Optional) 在失败状态后,连续探活成功的次数,认为探活成功。
:param exec: (Optional) 在容器内执行指定命令;如果命令退出时返回码为 0 则认为诊断成功。
:param httpGet: (Optional) 对指定的端口和路径上的容器的 IP 地址执行 HTTP Get 请求。如果响应的状态码大于等于 200 且小于 400,则认为诊断成功。
:param tcpSocket: (Optional) 对指定端口上的容器的 IP 地址进行 TCP 检查;如果端口打开,则认为诊断成功。
"""
self.initialDelaySeconds = initialDelaySeconds
self.periodSeconds = periodSeconds
self.timeoutSeconds = timeoutSeconds
self.failureThreshold = failureThreshold
self.successThreshold = successThreshold
self.exec = exec
self.httpGet = httpGet
self.tcpSocket = tcpSocket
|
py | 1a3665607637ea3aa2f53124a12722029bc74eb1 | import json
from urllib.request import Request, urlopen
from urllib.parse import urljoin, urlencode
from urllib.error import HTTPError, URLError
class RippleDataAPIClient(object):
def __init__(self, node: str = 'https://data.ripple.com'):
self.node = node
def __repr__(self):
return '<RippleDataAPIClient node=%r>' % self.node
def _call(self, url_params: tuple, params: dict) -> dict:
"""
Send request to data API
:param url_params: url parameters which are forming endpoint
:param params: query params
:return: response dict
"""
api_version = "/v2/"
endpoint = "/".join(url_params)
api_url = "".join((api_version, endpoint))
url = urljoin(self.node, api_url)
url = url + "?" + urlencode(params)
req = Request(method='GET', url=url)
try:
with urlopen(req) as res:
res_json = json.loads(res.fp.read().decode('utf-8'))
return res_json
except HTTPError as err:
return {"status": "error", "msg": err}
except URLError as err:
return {"status": "error", "msg": err}
def get_ledger(self, ledger_identifier: str, **query_params) -> dict:
"""
Retrieve a specific Ledger by hash, index, date, or latest validated.
Reference: https://developers.ripple.com/data-api.html#get-ledger
"""
url_params = 'ledgers', ledger_identifier
return self._call(url_params, query_params)
def get_ledger_validations(self, ledger_hash: str, **query_params) -> dict:
"""
Retrieve a any validations recorded for a specific ledger hash. This dataset includes ledger versions
that are outside the validated ledger chain.
Reference: https://developers.ripple.com/data-api.html#get-ledger-validations
"""
endpoint = 'ledgers', ledger_hash, 'validations'
return self._call(endpoint, query_params)
def get_ledger_validation(self, ledger_hash: str,
pubkey: str, **query_params) -> dict:
"""
Retrieve a validation vote recorded for a specific ledger hash by a specific validator.
This dataset includes ledger versions that are outside the validated ledger chain
Reference: https://developers.ripple.com/data-api.html#get-ledger-validation
"""
url_params = 'ledgers', ledger_hash, 'validations', pubkey
return self._call(url_params, query_params)
def get_transaction(self, hash: str, **query_params) -> dict:
"""
Retrieve a specific transaction by its identifying hash.
Reference: https://developers.ripple.com/data-api.html#get-transaction
"""
url_params = 'transactions', hash
return self._call(url_params, query_params)
def get_transactions(self, **query_params) -> dict:
"""
Retrieve transactions by time
Reference: https://developers.ripple.com/data-api.html#get-transactions
"""
return self._call(('transactions', ), query_params)
def get_payments(self, currency: str = None, **query_params) -> dict:
"""
Retrieve Payments over time, where Payments are defined as Payment type transactions where the sender
of the transaction is not also the destination.
Reference: https://developers.ripple.com/data-api.html#get-payments
"""
url_params = 'payments'
if currency:
url_params = 'payments', currency
return self._call(url_params, query_params)
def get_exchanges(self, base: str, counter: str, **query_params) -> dict:
"""
Retrieve Exchanges for a given currency pair over time. Results can be returned as individual exchanges
or aggregated to a specific list of intervals
Reference: https://developers.ripple.com/data-api.html#get-exchanges
"""
url_params = 'exchanges', base, counter
return self._call(url_params, query_params)
def get_exchange_rates(self, base: str, counter: str,
**query_params) -> dict:
"""
Retrieve an exchange rate for a given currency pair at a specific time.
Reference: https://developers.ripple.com/data-api.html#get-exchange-rates
"""
url_params = 'exchange_rates', base, counter
return self._call(url_params, query_params)
def normalize(self, **query_params) -> dict:
"""
Convert an amount from one currency and issuer to another, using the network exchange rates.
Reference: https://developers.ripple.com/data-api.html#normalize
"""
return self._call(('normalize', ), query_params)
def get_daily_reports(self, date: str = None, **query_params) -> dict:
"""
Retrieve per account per day aggregated payment summaries
Refernce: https://developers.ripple.com/data-api.html#get-daily-reports
"""
url_params = 'reports'
if date:
url_params = 'reports', date
return self._call(url_params, query_params)
def get_stats(self, **query_params) -> dict:
"""
Retrieve statistics about transaction activity in the XRP Ledger, divided into intervals of time.
Reference: https://developers.ripple.com/data-api.html#get-stats
"""
return self._call(('stats', ), query_params)
def get_active_accounts(self, base: str, counter: str,
**query_params) -> dict:
"""
Get information on which accounts are actively trading in a specific currency pair.
Reference: https://developers.ripple.com/data-api.html#get-active-accounts
"""
url_params = 'active_accounts', base, counter
return self._call(url_params, query_params)
def get_exchange_volume(self, **query_params) -> dict:
"""
Get aggregated exchange volume for a given time period.
The API returns results in units of a single display currency rather than many different currencies.
The conversion uses standard rates to and from XRP.
Reference: https://developers.ripple.com/data-api.html#get-exchange-volume
"""
url_params = 'network', 'exchange_volume'
return self._call(url_params, query_params)
def get_payment_volume(self, **query_params) -> dict:
"""
Get aggregated payment volume for a given time period.
The API returns results in units of a single display currency rather than many different currencies.
The conversion uses standard rates to and from XRP.
Reference: https://developers.ripple.com/data-api.html#get-payment-volume
"""
url_params = 'network', 'payment_volume'
return self._call(url_params, query_params)
def get_external_markets(self, **query_params) -> dict:
"""
Get aggregated exchange volume from a list of off ledger exchanges for a specified rolling interval.
The API returns results in units of a single display currency rather than many different currencies.
The conversion uses standard rates to and from XRP.
Reference: https://developers.ripple.com/data-api.html#get-external-markets
"""
url_params = 'network', 'external_markets'
return self._call(url_params, query_params)
def get_xrp_distribution(self, **query_params) -> dict:
"""
Get information on the total amount of XRP in existence and in circulation, by weekly intervals.
The API returns results in units of a single display currency rather than many different currencies.
The conversion uses standard rates to and from XRP.
Reference: https://developers.ripple.com/data-api.html#get-xrp-distribution
"""
url_params = 'network', 'xrp_distribution'
return self._call(url_params, query_params)
def get_top_currencies(self, date: str = None, **query_params) -> dict:
"""
Returns the top currencies on the XRP Ledger, ordered from highest rank to lowest.
Reference: https://developers.ripple.com/data-api.html#get-top-currencies
"""
url_params = 'network', 'top_currencies'
if date:
url_params = 'network', 'top_currencies', date
return self._call(url_params, query_params)
def get_top_markets(self, date: str = None, **query_params) -> dict:
"""
Returns the top exchange markets on the XRP Ledger, ordered from highest rank to lowest.
Reference: https://developers.ripple.com/data-api.html#get-top-markets
"""
url_params = 'network', 'top_markets'
if date:
url_params = 'network', 'top_markets', date
return self._call(url_params, query_params)
def get_transaction_costs(self, **query_params) -> dict:
"""
Returns transaction cost stats per ledger, hour, or day. The data shows the average, minimum, maximum,
and total transaction costs paid for the given interval or ledger.
Reference: https://developers.ripple.com/data-api.html#get-transaction-costs
"""
url_params = 'network', 'fees'
return self._call(url_params, query_params)
def get_fee_stats(self, **query_params) -> dict:
"""
Returns snapshots of the metrics derived from rippled's fee command.
Reference: https://developers.ripple.com/data-api.html#get-fee-stats
"""
url_params = 'network', 'fee_stats'
return self._call(url_params, query_params)
def get_topology(self, **query_params) -> dict:
"""
Get known rippled servers and peer-to-peer connections between them.
Reference: https://developers.ripple.com/data-api.html#get-topology
"""
url_params = 'network', 'topology'
return self._call(url_params, query_params)
def get_topology_nodes(self, **query_params) -> dict:
"""
Get known rippled nodes. (This is a subset of the data returned by the Get Topology method.)
Reference: https://developers.ripple.com/data-api.html#get-topology-nodes
"""
url_params = 'network', 'topology', 'nodes'
return self._call(url_params, query_params)
def get_topology_node(self, pubkey: str, **query_params) -> dict:
"""
Get information about a single rippled server by its node public key (not validator public key).
Reference: https://developers.ripple.com/data-api.html#get-topology-node
"""
url_params = 'network', 'topology', 'nodes', pubkey
return self._call(url_params, query_params)
def get_topology_links(self, **query_params) -> dict:
"""
Get information on peer-to-peer connections between rippled servers.
(This is a subset of the data returned by the Get Topology method.)
Reference: https://developers.ripple.com/data-api.html#get-topology-links
"""
url_params = 'network', 'topology', 'links'
return self._call(url_params, query_params)
def get_validator(self, pubkey: str, **query_params) -> dict:
"""
Get details of a single validator in the consensus network.
Reference: https://developers.ripple.com/data-api.html#get-validator
"""
url_params = 'network', 'validators', pubkey
return self._call(url_params, query_params)
def get_validators(self, **query_params) -> dict:
"""
Get a list of known validators.
Reference: https://developers.ripple.com/data-api.html#get-validators
"""
url_params = 'network', 'validators'
return self._call(url_params, query_params)
def get_validator_validations(self, pubkey: str, **query_params) -> dict:
"""
Retrieve validation votes signed by a specified validator, including votes for ledger
versions that are outside the main ledger chain
Reference: https://developers.ripple.com/data-api.html#get-validator-validations
"""
url_params = 'network', 'validators', pubkey, 'validations'
return self._call(url_params, query_params)
def get_validations(self, **query_params) -> dict:
"""
Retrieve validation votes, including votes for ledger versions that are outside the main ledger chain.
Reference: https://developers.ripple.com/data-api.html#get-validations
"""
url_params = 'network', 'validations'
return self._call(url_params, query_params)
def get_single_validator_reports(
self, pubkey: str, **query_params) -> dict:
"""
Get a single validator's validation vote stats for 24-hour intervals.
Reference: https://developers.ripple.com/data-api.html#get-single-validator-reports
"""
url_params = 'network', 'validators', pubkey, 'reports'
return self._call(url_params, query_params)
def get_daily_validator_reports(self, **query_params) -> dict:
"""
Get a validation vote stats and validator information for all known validators in a 24-hour period.
Reference: https://developers.ripple.com/data-api.html#get-daily-validator-reports
"""
url_params = 'network', 'validator_reports'
return self._call(url_params, query_params)
def get_rippled_versions(self, **query_params) -> dict:
"""
Reports the latest versions of rippled available from the official Ripple Yum repositories.
Reference: https://developers.ripple.com/data-api.html#get-rippled-versions
"""
url_params = 'network', 'rippled_versions'
return self._call(url_params, query_params)
def get_all_gateways(self, **query_params) -> dict:
"""
Get information about known gateways.
Reference: https://developers.ripple.com/data-api.html#get-all-gateways
"""
return self._call(('gateways', ), query_params)
def get_gateway(self, gateway: str, **query_params) -> dict:
"""
Get information about a specific gateway from the Data API's list of known gateways.
Reference: https://developers.ripple.com/data-api.html#get-gateway
"""
url_params = 'gateways', gateway
return self._call(url_params, query_params)
def get_currency_image(self, currencyimage: str, **query_params) -> dict:
"""
Retrieve vector icons for various currencies.
Reference: https://developers.ripple.com/data-api.html#get-currency-image
"""
url_params = 'currencies', currencyimage
return self._call(url_params, query_params)
def get_accounts(self, **query_params) -> dict:
"""
Retrieve information about the creation of new accounts in the XRP Ledger.
Reference: https://developers.ripple.com/data-api.html#get-accounts
"""
return self._call(('accounts', ), query_params)
def get_account(self, address: str, **query_params) -> dict:
"""
Get creation info for a specific ripple account
Reference: https://developers.ripple.com/data-api.html#get-account
"""
url_params = 'accounts', address
return self._call(url_params, query_params)
def get_account_balances(self, address: str, **query_params) -> dict:
"""
Get all balances held or owed by a specific XRP Ledger account.
Reference: https://developers.ripple.com/data-api.html#get-account-balances
"""
url_params = 'accounts', address, 'balances'
return self._call(url_params, query_params)
def get_account_orders(self, address: str, **query_params) -> dict:
"""
Get orders in the order books, placed by a specific account. This does not return orders that have
already been filled.
Reference: https://developers.ripple.com/data-api.html#get-account-orders
"""
url_params = 'accounts', address, 'orders'
return self._call(url_params, query_params)
def get_account_transaction_history(
self, address: str, **query_params) -> dict:
"""
Retrieve a history of transactions that affected a specific account.
This includes all transactions the account sent, payments the account received,
and payments that rippled through the account.
Reference: https://developers.ripple.com/data-api.html#get-account-transaction-history
"""
url_params = 'accounts', address, 'transactions'
return self._call(url_params, query_params)
def get_transaction_by_account_and_sequence(
self, address: str, sequence: str, **query_params) -> dict:
"""
Retrieve a specifc transaction originating from a specified account
Reference: https://developers.ripple.com/data-api.html#get-transaction-by-account-and-sequence
"""
url_params = 'accounts', address, 'transactions', sequence
return self._call(url_params, query_params)
def get_account_payments(self, address: str, **query_params) -> dict:
"""
Retrieve a payments for a specified account
Reference: https://developers.ripple.com/data-api.html#get-account-payments
"""
url_params = 'accounts', address, 'payments'
return self._call(url_params, query_params)
def get_account_exchanges(
self, address: str, base: str = None, counter: str = None, **
query_params) ->dict:
"""
Retrieve Exchanges for a given account over time.
Reference: https://developers.ripple.com/data-api.html#get-account-exchanges
"""
url_params = 'accounts', address, 'exchanges'
if base and counter:
url_params = 'accounts', address, 'exchanges', base, counter
return self._call(url_params, query_params)
def get_account_balance_changes(
self, address: str, **query_params) -> dict:
"""
Retrieve Balance changes for a given account over time.
Reference: https://developers.ripple.com/data-api.html#get-account-balance-changes
"""
url_params = 'accounts', address, 'balance_changes'
return self._call(url_params, query_params)
def get_account_reports(
self, address: str, date: str = None, **query_params) -> dict:
"""
Retrieve daily summaries of payment activity for an account.
Reference: https://developers.ripple.com/data-api.html#get-account-reports
"""
url_params = 'accounts', address, 'reports'
if date:
url_params = 'accounts', address, 'reports', date
return self._call(url_params, query_params)
def get_account_transaction_stats(
self, address: str, **query_params) -> dict:
"""
Retrieve daily summaries of transaction activity for an account.
Reference: https://developers.ripple.com/data-api.html#get-account-transaction-stats
"""
url_params = 'accounts', address, 'stats', 'transactions'
return self._call(url_params, query_params)
def get_account_value_stats(self, address: str, **query_params) -> dict:
"""
Retrieve daily summaries of transaction activity for an account.
Reference: https://developers.ripple.com/data-api.html#get-account-value-stats
"""
url_params = 'accounts', address, 'stats', 'value'
return self._call(url_params, query_params)
def check_api(self, **query_params) -> dict:
"""
Check the health of the API service.
Reference: https://developers.ripple.com/data-api.html#health-check-api
"""
url_params = 'health', 'api'
return self._call(url_params, query_params)
def check_ledger_importer(self, **query_params) -> dict:
"""
Check the health of the Ledger Importer Service.
Reference: https://developers.ripple.com/data-api.html#health-check-ledger-importer
"""
url_params = 'health', 'importer'
return self._call(url_params, query_params)
def check_nodes_etl(self, **query_params) -> dict:
"""
Check the health of the Topology Nodes Extract, Transform, Load (ETL) Service.
Reference: https://developers.ripple.com/data-api.html#health-check-nodes-etl
"""
url_params = 'health', 'nodes_etl'
return self._call(url_params, query_params)
def check_validations_etl(self, **query_params) -> dict:
"""
Check the health of the Validations Extract, Transform, Load (ETL) Service.
Reference: https://developers.ripple.com/data-api.html#health-check-validations-etl
"""
url_params = 'health', 'validations_etl'
return self._call(url_params, query_params)
|
py | 1a3665ebda8ef270fd9d712ab61d5247545c4a55 | # -*- coding: utf-8 -*-
"""Analysis plugin to look up files in nsrlsvr and tag events."""
import socket
from plaso.analysis import hash_tagging
from plaso.analysis import logger
from plaso.analysis import manager
class NsrlsvrAnalyzer(hash_tagging.HashAnalyzer):
"""Analyzes file hashes by consulting an nsrlsvr instance.
Attributes:
analyses_performed (int): number of analysis batches completed by this
analyzer.
hashes_per_batch (int): maximum number of hashes to analyze at once.
seconds_spent_analyzing (int): number of seconds this analyzer has spent
performing analysis (as opposed to waiting on queues, etc.)
wait_after_analysis (int): number of seconds the analyzer will sleep for
after analyzing a batch of hashes.
"""
_RECEIVE_BUFFER_SIZE = 4096
_SOCKET_TIMEOUT = 3
SUPPORTED_HASHES = ['md5', 'sha1']
def __init__(self, hash_queue, hash_analysis_queue, **kwargs):
"""Initializes an nsrlsvr analyzer thread.
Args:
hash_queue (Queue.queue): contains hashes to be analyzed.
hash_analysis_queue (Queue.queue): that the analyzer will append
HashAnalysis objects this queue.
"""
super(NsrlsvrAnalyzer, self).__init__(
hash_queue, hash_analysis_queue, **kwargs)
self._host = None
self._port = None
self.hashes_per_batch = 100
def _GetSocket(self):
"""Establishes a connection to an nsrlsvr instance.
Returns:
socket._socketobject: socket connected to an nsrlsvr instance or None if
a connection cannot be established.
"""
try:
return socket.create_connection(
(self._host, self._port), self._SOCKET_TIMEOUT)
except socket.error as exception:
logger.error('Unable to connect to nsrlsvr with error: {0!s}.'.format(
exception))
def _QueryHash(self, nsrl_socket, digest):
"""Queries nsrlsvr for a specific hash.
Args:
nsrl_socket (socket._socketobject): socket of connection to nsrlsvr.
digest (str): hash to look up.
Returns:
bool: True if the hash was found, False if not or None on error.
"""
try:
query = 'QUERY {0:s}\n'.format(digest).encode('ascii')
except UnicodeDecodeError:
logger.error('Unable to encode digest: {0!s} to ASCII.'.format(digest))
return False
response = None
try:
nsrl_socket.sendall(query)
response = nsrl_socket.recv(self._RECEIVE_BUFFER_SIZE)
except socket.error as exception:
logger.error('Unable to query nsrlsvr with error: {0!s}.'.format(
exception))
if not response:
return False
# Strip end-of-line characters since they can differ per platform on which
# nsrlsvr is running.
response = response.strip()
# nsrlsvr returns "OK 1" if the has was found or "OK 0" if not.
return response == b'OK 1'
def Analyze(self, hashes):
"""Looks up hashes in nsrlsvr.
Args:
hashes (list[str]): hash values to look up.
Returns:
list[HashAnalysis]: analysis results, or an empty list on error.
"""
logger.debug('Opening connection to {0:s}:{1:d}'.format(
self._host, self._port))
nsrl_socket = self._GetSocket()
if not nsrl_socket:
self.SignalAbort()
return []
hash_analyses = []
for digest in hashes:
response = self._QueryHash(nsrl_socket, digest)
if response is None:
continue
hash_analysis = hash_tagging.HashAnalysis(digest, response)
hash_analyses.append(hash_analysis)
nsrl_socket.close()
logger.debug('Closed connection to {0:s}:{1:d}'.format(
self._host, self._port))
return hash_analyses
def SetHost(self, host):
"""Sets the address or hostname of the server running nsrlsvr.
Args:
host (str): IP address or hostname to query.
"""
self._host = host
def SetPort(self, port):
"""Sets the port where nsrlsvr is listening.
Args:
port (int): port to query.
"""
self._port = port
def TestConnection(self):
"""Tests the connection to nsrlsvr.
Checks if a connection can be set up and queries the server for the
MD5 of an empty file and expects a response. The value of the response
is not checked.
Returns:
bool: True if nsrlsvr instance is reachable.
"""
response = None
nsrl_socket = self._GetSocket()
if nsrl_socket:
response = self._QueryHash(
nsrl_socket, 'd41d8cd98f00b204e9800998ecf8427e')
nsrl_socket.close()
return response is not None
class NsrlsvrAnalysisPlugin(hash_tagging.HashTaggingAnalysisPlugin):
"""Analysis plugin for looking up hashes in nsrlsvr."""
# The NSRL contains files of all different types, and can handle a high load
# so look up all files.
DATA_TYPES = ['fs:stat', 'fs:stat:ntfs']
NAME = 'nsrlsvr'
def __init__(self):
"""Initializes an nsrlsvr analysis plugin."""
super(NsrlsvrAnalysisPlugin, self).__init__(NsrlsvrAnalyzer)
self._label = None
def GenerateLabels(self, hash_information):
"""Generates a list of strings that will be used in the event tag.
Args:
hash_information (bool): whether the analyzer received a response from
nsrlsvr indicating that the hash was present in its loaded NSRL set.
Returns:
list[str]: strings describing the results from nsrlsvr.
"""
if hash_information:
return [self._label]
# TODO: Renable when tagging is removed from the analysis report.
# return ['nsrl_not_present']
return []
def SetLabel(self, label):
"""Sets the tagging label.
Args:
label (str): label to apply to events extracted from files that are
present in nsrlsvr.
"""
self._label = label
def SetHost(self, host):
"""Sets the address or hostname of the server running nsrlsvr.
Args:
host (str): IP address or hostname to query.
"""
self._analyzer.SetHost(host)
def SetPort(self, port):
"""Sets the port where nsrlsvr is listening.
Args:
port (int): port to query.
"""
self._analyzer.SetPort(port)
def TestConnection(self):
"""Tests the connection to nsrlsvr.
Returns:
bool: True if nsrlsvr instance is reachable.
"""
return self._analyzer.TestConnection()
manager.AnalysisPluginManager.RegisterPlugin(NsrlsvrAnalysisPlugin)
|
py | 1a3666e184aad1ac63baecaa81952345dac1f0f6 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import datetime as dt
import itertools
import pydoc
import tenacity
import weakref
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import reflection
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common import identifier
from heat.common import short_id
from heat.common import timeutils
from heat.engine import attributes
from heat.engine.cfn import template as cfn_tmpl
from heat.engine import clients
from heat.engine import environment
from heat.engine import event
from heat.engine import function
from heat.engine.hot import template as hot_tmpl
from heat.engine import node_data
from heat.engine import properties
from heat.engine import resources
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import status
from heat.engine import support
from heat.engine import sync_point
from heat.engine import template
from heat.objects import resource as resource_objects
from heat.objects import resource_data as resource_data_objects
from heat.objects import resource_properties_data as rpd_objects
from heat.rpc import client as rpc_client
cfg.CONF.import_opt('action_retry_limit', 'heat.common.config')
cfg.CONF.import_opt('observe_on_update', 'heat.common.config')
cfg.CONF.import_opt('error_wait_time', 'heat.common.config')
LOG = logging.getLogger(__name__)
datetime = dt.datetime
def _register_class(resource_type, resource_class):
resources.global_env().register_class(resource_type, resource_class)
# Attention developers about to move/delete this: STOP IT!!!
UpdateReplace = exception.UpdateReplace
# Attention developers about to move this: STOP IT!!!
class NoActionRequired(Exception):
"""Exception raised when a signal is ignored.
Resource subclasses should raise this exception from handle_signal() to
suppress recording of an event corresponding to the signal.
"""
def __init__(self, res_name='Unknown', reason=''):
msg = (_("The resource %(res)s could not perform "
"scaling action: %(reason)s") %
{'res': res_name, 'reason': reason})
super(Exception, self).__init__(six.text_type(msg))
class PollDelay(Exception):
"""Exception to delay polling of the resource.
This exception may be raised by a Resource subclass's check_*_complete()
methods to indicate that it need not be polled again immediately. If this
exception is raised, the check_*_complete() method will not be called
again until the nth time that the resource becomes eligible for polling.
A PollDelay period of 1 is equivalent to returning False.
"""
def __init__(self, period):
assert period >= 1
self.period = period
@six.python_2_unicode_compatible
class Resource(status.ResourceStatus):
BASE_ATTRIBUTES = (SHOW, ) = (attributes.SHOW_ATTR, )
LOCK_ACTIONS = (
LOCK_NONE, LOCK_ACQUIRE, LOCK_RELEASE, LOCK_RESPECT,
) = (
None, 1, -1, 0,
)
# If True, this resource must be created before it can be referenced.
strict_dependency = True
# Resource implementation set this to the subset of resource properties
# supported for handle_update, used by update_template_diff_properties
update_allowed_properties = ()
# Resource implementations set this to the name: description dictionary
# that describes the appropriate resource attributes
attributes_schema = {}
# Resource implementations set this to update policies
update_policy_schema = {}
# Default entity of resource, which is used for during resolving
# show attribute
entity = None
# Description dictionary, that describes the common attributes for all
# resources
base_attributes_schema = {
SHOW: attributes.Schema(
_("Detailed information about resource."),
cache_mode=attributes.Schema.CACHE_NONE,
type=attributes.Schema.MAP
)
}
# If True, this resource may perform authenticated API requests
# throughout its lifecycle
requires_deferred_auth = False
# Limit to apply to physical_resource_name() size reduction algorithm.
# If set to None no limit will be applied.
physical_resource_name_limit = 255
support_status = support.SupportStatus()
# Default name to use for calls to self.client()
default_client_name = None
# Required service extension for this resource
required_service_extension = None
# no signal actions
no_signal_actions = (status.ResourceStatus.SUSPEND,
status.ResourceStatus.DELETE)
# Whether all other resources need a metadata_update() after
# a signal to this resource
signal_needs_metadata_updates = True
def __new__(cls, name, definition, stack):
"""Create a new Resource of the appropriate class for its type."""
assert isinstance(definition, rsrc_defn.ResourceDefinition)
if cls != Resource:
# Call is already for a subclass, so pass it through
ResourceClass = cls
else:
registry = stack.env.registry
ResourceClass = registry.get_class_to_instantiate(
definition.resource_type,
resource_name=name)
assert issubclass(ResourceClass, Resource)
return super(Resource, cls).__new__(ResourceClass)
@classmethod
def _validate_service_availability(cls, context, resource_type):
try:
(svc_available, reason) = cls.is_service_available(context)
except Exception as exc:
LOG.exception("Resource type %s unavailable",
resource_type)
ex = exception.ResourceTypeUnavailable(
resource_type=resource_type,
service_name=cls.default_client_name,
reason=six.text_type(exc))
raise ex
else:
if not svc_available:
ex = exception.ResourceTypeUnavailable(
resource_type=resource_type,
service_name=cls.default_client_name,
reason=reason)
LOG.info(six.text_type(ex))
raise ex
def __init__(self, name, definition, stack):
def _validate_name(res_name):
if '/' in res_name:
message = _('Resource name may not contain "/"')
raise exception.StackValidationFailed(message=message)
_validate_name(name)
self.stack = stack
self.context = stack.context
self.name = name
self.t = definition
self.reparse(client_resolve=False)
self.update_policy = self.t.update_policy(self.update_policy_schema,
self.context)
self._update_allowed_properties = self.calc_update_allowed(
self.properties)
self.attributes_schema.update(self.base_attributes_schema)
self.attributes = attributes.Attributes(self.name,
self.attributes_schema,
self._make_resolver(
weakref.ref(self)))
self.abandon_in_progress = False
self.resource_id = None
# if the stack is being deleted, assume we've already been deleted.
# or if the resource has not been created yet, and the stack was
# rollback, we set the resource to rollback
if stack.action == stack.DELETE or stack.action == stack.ROLLBACK:
self.action = stack.action
else:
self.action = self.INIT
self.status = self.COMPLETE
self.status_reason = ''
self.id = None
self.uuid = None
self._data = None
self._attr_data_id = None
self._rsrc_metadata = None
self._rsrc_prop_data_id = None
self._stored_properties_data = None
self.created_time = stack.created_time
self.updated_time = stack.updated_time
self._rpc_client = None
self.needed_by = []
self.requires = []
self.replaces = None
self.replaced_by = None
self.current_template_id = None
self.root_stack_id = None
self._calling_engine_id = None
self._atomic_key = None
self.converge = False
if not self.stack.in_convergence_check:
resource = stack.db_resource_get(name)
if resource:
self._load_data(resource)
else:
proxy = self.stack.defn[self.name]
node_data = proxy._resource_data
if node_data is not None:
self.action, self.status = proxy.state
self.id = node_data.primary_key
self.uuid = node_data.uuid
def rpc_client(self):
"""Return a client for making engine RPC calls."""
if not self._rpc_client:
self._rpc_client = rpc_client.EngineClient()
return self._rpc_client
def _load_data(self, resource):
"""Load the resource state from its DB representation."""
self.resource_id = resource.physical_resource_id
self.action = resource.action
self.status = resource.status
self.status_reason = resource.status_reason
self.id = resource.id
self.uuid = resource.uuid
try:
self._data = resource_data_objects.ResourceData.get_all(
self, resource.data)
except exception.NotFound:
self._data = {}
self.attributes.cached_attrs = resource.attr_data or None
self._attr_data_id = resource.attr_data_id
self._rsrc_metadata = resource.rsrc_metadata
self._stored_properties_data = resource.properties_data
self._rsrc_prop_data_id = resource.rsrc_prop_data_id
self.created_time = resource.created_at
self.updated_time = resource.updated_at
self.needed_by = resource.needed_by
self.requires = resource.requires
self.replaces = resource.replaces
self.replaced_by = resource.replaced_by
self.current_template_id = resource.current_template_id
self.root_stack_id = resource.root_stack_id
self._atomic_key = resource.atomic_key
@property
def external_id(self):
return self.t.external_id()
@classmethod
def getdoc(cls):
if cls.__doc__ is None:
return _('No description available')
return pydoc.getdoc(cls)
@property
def stack(self):
stack = self._stackref()
assert stack is not None, "Need a reference to the Stack object"
return stack
@stack.setter
def stack(self, stack):
self._stackref = weakref.ref(stack)
@classmethod
def load(cls, context, resource_id, current_traversal, is_update, data):
"""Load a specified resource from the database to check.
Returns a tuple of the Resource, the StackDefinition corresponding to
the resource's ResourceDefinition (i.e. the one the resource was last
updated to if it has already been created, or the one it will be
created with if it hasn't been already), and the Stack containing the
latest StackDefinition (i.e. the one that the latest traversal is
updating to.
The latter two must remain in-scope, because the Resource holds weak
references to them.
"""
from heat.engine import stack as stack_mod
db_res = resource_objects.Resource.get_obj(context, resource_id)
curr_stack = stack_mod.Stack.load(context, stack_id=db_res.stack_id,
cache_data=data)
initial_stk_defn = latest_stk_defn = curr_stack.defn
if (db_res.current_template_id != curr_stack.t.id and
(db_res.action != cls.INIT or
not is_update or
current_traversal != curr_stack.current_traversal)):
# load the definition associated with the resource's template
current_template_id = db_res.current_template_id
current_template = template.Template.load(context,
current_template_id)
initial_stk_defn = curr_stack.defn.clone_with_new_template(
current_template,
curr_stack.identifier())
curr_stack.defn = initial_stk_defn
res_defn = initial_stk_defn.resource_definition(db_res.name)
res_type = initial_stk_defn.env.registry.get_class_to_instantiate(
res_defn.resource_type, resource_name=db_res.name)
# If the resource type has changed and the new one is a valid
# substitution, use that as the class to instantiate.
if is_update and (latest_stk_defn is not initial_stk_defn):
try:
new_res_defn = latest_stk_defn.resource_definition(db_res.name)
except KeyError:
pass
else:
new_registry = latest_stk_defn.env.registry
new_res_type = new_registry.get_class_to_instantiate(
new_res_defn.resource_type, resource_name=db_res.name)
if res_type.check_is_substituted(new_res_type):
res_type = new_res_type
# Load only the resource in question; don't load all resources
# by invoking stack.resources. Maintain light-weight stack.
resource = res_type(db_res.name, res_defn, curr_stack)
resource._load_data(db_res)
curr_stack.defn = latest_stk_defn
return resource, initial_stk_defn, curr_stack
def make_replacement(self, new_tmpl_id):
"""Create a replacement resource in the database.
Returns the DB ID of the new resource, or None if the new resource
cannot be created (generally because the template ID does not exist).
Raises UpdateInProgress if another traversal has already locked the
current resource.
"""
# 1. create the replacement with "replaces" = self.id
# Don't set physical_resource_id so that a create is triggered.
rs = {'stack_id': self.stack.id,
'name': self.name,
'rsrc_prop_data_id': None,
'needed_by': self.needed_by,
'requires': self.requires,
'replaces': self.id,
'action': self.INIT,
'status': self.COMPLETE,
'current_template_id': new_tmpl_id,
'stack_name': self.stack.name,
'root_stack_id': self.root_stack_id}
update_data = {'status': self.COMPLETE}
# Retry in case a signal has updated the atomic_key
attempts = max(cfg.CONF.client_retry_limit, 0) + 1
def prepare_attempt(fn, attempt):
if attempt > 1:
res_obj = resource_objects.Resource.get_obj(
self.context, self.id)
if (res_obj.engine_id is not None or
res_obj.updated_at != self.updated_time):
raise exception.UpdateInProgress(resource_name=self.name)
self._atomic_key = res_obj.atomic_key
@tenacity.retry(
stop=tenacity.stop_after_attempt(attempts),
retry=tenacity.retry_if_exception_type(
exception.UpdateInProgress),
before=prepare_attempt,
wait=tenacity.wait_random(max=2),
reraise=True)
def create_replacement():
return resource_objects.Resource.replacement(self.context,
self.id,
update_data,
rs,
self._atomic_key)
new_rs = create_replacement()
if new_rs is None:
return None
self._incr_atomic_key(self._atomic_key)
self.replaced_by = new_rs.id
return new_rs.id
def reparse(self, client_resolve=True):
"""Reparse the resource properties.
Optional translate flag for property translation and
client_resolve flag for resolving properties by doing
client lookup.
"""
self.properties = self.t.properties(self.properties_schema,
self.context)
self.translate_properties(self.properties, client_resolve)
def calc_update_allowed(self, props):
update_allowed_set = set(self.update_allowed_properties)
for (psk, psv) in six.iteritems(props.props):
if psv.update_allowed():
update_allowed_set.add(psk)
return update_allowed_set
def __eq__(self, other):
"""Allow == comparison of two resources."""
# For the purposes of comparison, we declare two resource objects
# equal if their names and resolved templates are the same
if isinstance(other, Resource):
return ((self.name == other.name) and
(self.t.freeze() == other.t.freeze()))
return NotImplemented
def __ne__(self, other):
"""Allow != comparison of two resources."""
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
return id(self)
def metadata_get(self, refresh=False):
if refresh:
self._rsrc_metadata = None
if self.id is None or self.action == self.INIT:
return self.t.metadata()
if self._rsrc_metadata is not None:
return self._rsrc_metadata
rs = resource_objects.Resource.get_obj(self.stack.context, self.id,
refresh=True,
fields=('rsrc_metadata', ))
self._rsrc_metadata = rs.rsrc_metadata
return rs.rsrc_metadata
@resource_objects.retry_on_conflict
def metadata_set(self, metadata, merge_metadata=None):
"""Write new metadata to the database.
The caller may optionally provide a merge_metadata() function, which
takes two arguments - the metadata passed to metadata_set() and the
current metadata of the resource - and returns the merged metadata to
write. If merge_metadata is not provided, the metadata passed to
metadata_set() is written verbatim, overwriting any existing metadata.
If a race condition is detected, the write will be retried with the new
result of merge_metadata() (if it is supplied) or the verbatim data (if
it is not).
"""
if self.id is None or self.action == self.INIT:
raise exception.ResourceNotAvailable(resource_name=self.name)
refresh = merge_metadata is not None
db_res = resource_objects.Resource.get_obj(
self.stack.context, self.id, refresh=refresh,
fields=('name', 'rsrc_metadata', 'atomic_key', 'engine_id',
'action', 'status'))
if db_res.action == self.DELETE:
self._db_res_is_deleted = True
LOG.debug("resource %(name)s, id: %(id)s is DELETE_%(st)s, "
"not setting metadata",
{'name': self.name, 'id': self.id, 'st': db_res.status})
raise exception.ResourceNotAvailable(resource_name=self.name)
LOG.debug('Setting metadata for %s', six.text_type(self))
if refresh:
metadata = merge_metadata(metadata, db_res.rsrc_metadata)
if db_res.update_metadata(metadata):
self._incr_atomic_key(db_res.atomic_key)
self._rsrc_metadata = metadata
def handle_metadata_reset(self):
"""Default implementation; should be overridden by resources.
Now we override this method to reset the metadata for scale-policy
and scale-group resources, because their metadata might hang in a
wrong state ('scaling_in_progress' is always True) if engine restarts
while scaling.
"""
pass
@classmethod
def set_needed_by(cls, db_rsrc, needed_by, expected_engine_id=None):
if db_rsrc:
db_rsrc.select_and_update(
{'needed_by': needed_by},
atomic_key=db_rsrc.atomic_key,
expected_engine_id=expected_engine_id
)
@classmethod
def set_requires(cls, db_rsrc, requires):
if db_rsrc:
db_rsrc.update_and_save(
{'requires': requires}
)
def _break_if_required(self, action, hook):
"""Block the resource until the hook is cleared if there is one."""
if self.stack.env.registry.matches_hook(self.name, hook):
self.trigger_hook(hook)
self._add_event(self.action, self.status,
_("%(a)s paused until Hook %(h)s is cleared")
% {'a': action, 'h': hook})
LOG.info('Reached hook on %s', self)
while self.has_hook(hook):
try:
yield
except BaseException as exc:
self.clear_hook(hook)
self._add_event(
self.action, self.status,
"Failure occurred while waiting.")
if (isinstance(exc, AssertionError) or
not isinstance(exc, Exception)):
raise
def has_nested(self):
"""Return True if the resource has an existing nested stack.
For most resource types, this will always return False. StackResource
subclasses return True when appropriate. Resource subclasses that may
return True must also provide a nested_identifier() method to return
the identifier of the nested stack, and a nested() method to return a
Stack object for the nested stack.
"""
return False
def get_nested_parameters_stack(self):
"""Return the nested stack for schema validation.
Regular resources don't have such a thing.
"""
return
def has_hook(self, hook):
# Clear the cache to make sure the data is up to date:
self._data = None
return self.data().get(hook) == "True"
def trigger_hook(self, hook):
self.data_set(hook, "True")
def clear_hook(self, hook):
self.data_delete(hook)
def type(self):
return self.t.resource_type
def has_interface(self, resource_type):
"""Check if resource is mapped to resource_type or is "resource_type".
Check to see if this resource is either mapped to resource_type
or is a "resource_type".
"""
if self.type() == resource_type:
return True
try:
ri = self.stack.env.get_resource_info(self.type(),
self.name)
except exception.EntityNotFound:
return False
else:
return ri.name == resource_type
def identifier(self):
"""Return an identifier for this resource."""
return identifier.ResourceIdentifier(resource_name=self.name,
**self.stack.identifier())
def frozen_definition(self):
"""Return a frozen ResourceDefinition with stored property values.
The returned definition will contain the property values read from the
database, and will have all intrinsic functions resolved (note that
this makes it useless for calculating dependencies).
"""
if self._stored_properties_data is not None:
args = {'properties': self._stored_properties_data}
else:
args = {}
return self.t.freeze(**args)
@contextlib.contextmanager
def frozen_properties(self):
"""Context manager to use the frozen property values from the database.
The live property values are always substituted back when the context
ends.
"""
live_props = self.properties
props = self.frozen_definition().properties(self.properties_schema,
self.context)
try:
self.properties = props
yield props
finally:
self.properties = live_props
def update_template_diff(self, after, before):
"""Returns the difference between the before and after json snippets.
If something has been removed in after which exists in before we set it
to None.
"""
return after - before
def update_template_diff_properties(self, after_props, before_props):
"""Return changed Properties between the before and after properties.
If any property having immutable as True is updated, raises
NotSupported error.
If any properties have changed which are not in
update_allowed_properties, raises UpdateReplace.
"""
update_allowed_set = self.calc_update_allowed(after_props)
immutable_set = set()
for (psk, psv) in six.iteritems(after_props.props):
if psv.immutable():
immutable_set.add(psk)
def prop_changed(key):
try:
before = before_props.get(key)
except (TypeError, ValueError) as exc:
# We shouldn't get here usually, but there is a known issue
# with template resources and new parameters in non-convergence
# stacks (see bug 1543685). The error should be harmless
# because we're on the before properties, which have presumably
# already been validated.
LOG.warning('Ignoring error in old property value '
'%(prop_name)s: %(msg)s',
{'prop_name': key, 'msg': six.text_type(exc)})
return True
return before != after_props.get(key)
# Create a set of keys which differ (or are missing/added)
changed_properties_set = set(k for k in after_props if prop_changed(k))
# Create a list of updated properties offending property immutability
update_replace_forbidden = [k for k in changed_properties_set
if k in immutable_set]
if update_replace_forbidden:
msg = _("Update to properties %(props)s of %(name)s (%(res)s)"
) % {'props': ", ".join(sorted(update_replace_forbidden)),
'res': self.type(), 'name': self.name}
raise exception.NotSupported(feature=msg)
if changed_properties_set and self.needs_replace_with_prop_diff(
changed_properties_set,
after_props,
before_props):
raise UpdateReplace(self)
if not changed_properties_set.issubset(update_allowed_set):
raise UpdateReplace(self.name)
return dict((k, after_props.get(k)) for k in changed_properties_set)
def __str__(self):
class_name = reflection.get_class_name(self, fully_qualified=False)
if self.stack.id is not None:
if self.resource_id is not None:
text = '%s "%s" [%s] %s' % (class_name, self.name,
self.resource_id,
six.text_type(self.stack))
else:
text = '%s "%s" %s' % (class_name, self.name,
six.text_type(self.stack))
else:
text = '%s "%s"' % (class_name, self.name)
return six.text_type(text)
def add_explicit_dependencies(self, deps):
"""Add all dependencies explicitly specified in the template.
The deps parameter is a Dependencies object to which dependency pairs
are added.
"""
for dep in self.t.dependencies(self.stack):
deps += (self, dep)
deps += (self, None)
def add_dependencies(self, deps):
"""Add implicit dependencies specific to the resource type.
Some resource types may have implicit dependencies on other resources
in the same stack that are not linked by a property value (that would
be set using get_resource or get_attr for example, thus creating an
explicit dependency). Such dependencies are opaque to the user and
should be avoided wherever possible, however in some circumstances they
are required due to magic in the underlying API.
The deps parameter is a Dependencies object to which dependency pairs
may be added.
"""
return
def required_by(self):
"""List of resources that require this one as a dependency.
Returns a list of names of resources that depend on this resource
directly.
"""
try:
reqd_by = self.stack.dependencies.required_by(self)
except KeyError:
if self.stack.convergence:
# for convergence, fall back to building from needed_by
needed_by_ids = self.needed_by or set()
reqd_by = [r for r in self.stack.resources.values()
if r.id in needed_by_ids]
else:
LOG.error('Getting required_by list for Resource not in '
'dependency graph.')
return []
return [r.name for r in reqd_by]
def client(self, name=None, version=None):
client_name = name or self.default_client_name
assert client_name, "Must specify client name"
return self.stack.clients.client(client_name, version)
def client_plugin(self, name=None):
client_name = name or self.default_client_name
assert client_name, "Must specify client name"
return self.stack.clients.client_plugin(client_name)
@classmethod
def is_service_available(cls, context):
# NOTE(kanagaraj-manickam): return True to satisfy the cases like
# resource does not have endpoint, such as RandomString, OS::Heat
# resources as they are implemented within the engine.
if cls.default_client_name is None:
return (True, None)
client_plugin = clients.Clients(context).client_plugin(
cls.default_client_name)
if not client_plugin:
raise exception.ClientNotAvailable(
client_name=cls.default_client_name)
service_types = client_plugin.service_types
if not service_types:
return (True, None)
# NOTE(kanagaraj-manickam): if one of the service_type does
# exist in the keystone, then considered it as available.
for service_type in service_types:
endpoint_exists = client_plugin.does_endpoint_exist(
service_type=service_type,
service_name=cls.default_client_name)
if endpoint_exists:
req_extension = cls.required_service_extension
is_ext_available = (
not req_extension or client_plugin.has_extension(
req_extension))
if is_ext_available:
return (True, None)
else:
reason = _('Required extension {0} in {1} service '
'is not available.')
reason = reason.format(req_extension,
cls.default_client_name)
else:
reason = _('{0} {1} endpoint is not in service catalog.')
reason = reason.format(cls.default_client_name, service_type)
return (False, reason)
def keystone(self):
return self.client('keystone')
def nova(self):
return self.client('nova')
def swift(self):
return self.client('swift')
def neutron(self):
return self.client('neutron')
def cinder(self):
return self.client('cinder')
def trove(self):
return self.client('trove')
def ceilometer(self):
return self.client('ceilometer')
def heat(self):
return self.client('heat')
def glance(self):
return self.client('glance')
def _incr_atomic_key(self, last_key):
if last_key is None:
self._atomic_key = 1
else:
self._atomic_key = last_key + 1
def _should_lock_on_action(self, action):
"""Return whether we should take a resource-level lock for an action.
In the legacy path, we always took a lock at the Stack level and never
at the Resource level. In convergence, we lock at the Resource level
for most operations. However, there are currently some exceptions:
the SUSPEND, RESUME, SNAPSHOT, and CHECK actions, and stack abandon.
"""
return (self.stack.convergence and
not self.abandon_in_progress and
action in {self.ADOPT,
self.CREATE,
self.UPDATE,
self.ROLLBACK,
self.DELETE})
@contextlib.contextmanager
def _action_recorder(self, action, expected_exceptions=tuple()):
"""Return a context manager to record the progress of an action.
Upon entering the context manager, the state is set to IN_PROGRESS.
Upon exiting, the state will be set to COMPLETE if no exception was
raised, or FAILED otherwise. Non-exit exceptions will be translated
to ResourceFailure exceptions.
Expected exceptions are re-raised, with the Resource moved to the
COMPLETE state.
"""
attempts = 1
first_iter = [True] # work around no nonlocal in py27
if self.stack.convergence:
if self._should_lock_on_action(action):
lock_acquire = self.LOCK_ACQUIRE
lock_release = self.LOCK_RELEASE
else:
lock_acquire = lock_release = self.LOCK_RESPECT
if action != self.CREATE:
attempts += max(cfg.CONF.client_retry_limit, 0)
else:
lock_acquire = lock_release = self.LOCK_NONE
# retry for convergence DELETE or UPDATE if we get the usual
# lock-acquire exception of exception.UpdateInProgress
@tenacity.retry(
stop=tenacity.stop_after_attempt(attempts),
retry=tenacity.retry_if_exception_type(
exception.UpdateInProgress),
wait=tenacity.wait_random(max=2),
reraise=True)
def set_in_progress():
if not first_iter[0]:
res_obj = resource_objects.Resource.get_obj(
self.context, self.id)
self._atomic_key = res_obj.atomic_key
else:
first_iter[0] = False
self.state_set(action, self.IN_PROGRESS, lock=lock_acquire)
try:
set_in_progress()
yield
except exception.UpdateInProgress as ex:
with excutils.save_and_reraise_exception():
LOG.info('Update in progress for %s', self.name)
except expected_exceptions as ex:
with excutils.save_and_reraise_exception():
self.state_set(action, self.COMPLETE, six.text_type(ex),
lock=lock_release)
LOG.debug('%s', six.text_type(ex))
except Exception as ex:
LOG.info('%(action)s: %(info)s',
{"action": action,
"info": six.text_type(self)},
exc_info=True)
failure = exception.ResourceFailure(ex, self, action)
self.state_set(action, self.FAILED, six.text_type(failure),
lock=lock_release)
raise failure
except BaseException as exc:
with excutils.save_and_reraise_exception():
try:
reason = six.text_type(exc)
msg = '%s aborted' % action
if reason:
msg += ' (%s)' % reason
self.state_set(action, self.FAILED, msg,
lock=lock_release)
except Exception:
LOG.exception('Error marking resource as failed')
else:
self.state_set(action, self.COMPLETE, lock=lock_release)
def action_handler_task(self, action, args=None, action_prefix=None):
"""A task to call the Resource subclass's handler methods for action.
Calls the handle_<ACTION>() method for the given action and then calls
the check_<ACTION>_complete() method with the result in a loop until it
returns True. If the methods are not provided, the call is omitted.
Any args provided are passed to the handler.
If a prefix is supplied, the handler method handle_<PREFIX>_<ACTION>()
is called instead.
"""
args = args or []
handler_action = action.lower()
check = getattr(self, 'check_%s_complete' % handler_action, None)
if action_prefix:
handler_action = '%s_%s' % (action_prefix.lower(), handler_action)
handler = getattr(self, 'handle_%s' % handler_action, None)
if callable(handler):
handler_data = handler(*args)
yield
if callable(check):
try:
while True:
try:
done = check(handler_data)
except PollDelay as delay:
yield delay.period
else:
if done:
break
else:
yield
except Exception:
raise
except: # noqa
with excutils.save_and_reraise_exception():
canceller = getattr(
self,
'handle_%s_cancel' % handler_action,
None
)
if callable(canceller):
try:
canceller(handler_data)
except Exception:
LOG.exception(
'Error cancelling resource %s',
action
)
@scheduler.wrappertask
def _do_action(self, action, pre_func=None, resource_data=None):
"""Perform a transition to a new state via a specified action.
Action should be e.g self.CREATE, self.UPDATE etc, we set
status based on this, the transition is handled by calling the
corresponding handle_* and check_*_complete functions
Note pre_func is an optional function reference which will
be called before the handle_<action> function
If the resource does not declare a check_$action_complete function,
we declare COMPLETE status as soon as the handle_$action call has
finished, and if no handle_$action function is declared, then we do
nothing, useful e.g if the resource requires no action for a given
state transition
"""
assert action in self.ACTIONS, 'Invalid action %s' % action
with self._action_recorder(action):
if callable(pre_func):
pre_func()
handler_args = [resource_data] if resource_data is not None else []
yield self.action_handler_task(action, args=handler_args)
def _update_stored_properties(self):
old_props = self._stored_properties_data
self._stored_properties_data = function.resolve(self.properties.data)
if self._stored_properties_data != old_props:
self._rsrc_prop_data_id = None
self.attributes.reset_resolved_values()
def referenced_attrs(self, stk_defn=None,
in_resources=True, in_outputs=True,
load_all=False):
"""Return the set of all attributes referenced in the template.
This enables the resource to calculate which of its attributes will
be used. By default, attributes referenced in either other resources
or outputs will be included. Either can be excluded by setting the
`in_resources` or `in_outputs` parameters to False. To limit to a
subset of outputs, pass an iterable of the output names to examine
for the `in_outputs` parameter.
The set of referenced attributes is calculated from the
StackDefinition object provided, or from the stack's current
definition if none is passed.
"""
if stk_defn is None:
stk_defn = self.stack.defn
def get_dep_attrs(source):
return set(itertools.chain.from_iterable(s.dep_attrs(self.name,
load_all)
for s in source))
refd_attrs = set()
if in_resources:
enabled_resources = stk_defn.enabled_rsrc_names()
refd_attrs |= get_dep_attrs(stk_defn.resource_definition(r_name)
for r_name in enabled_resources)
subset_outputs = isinstance(in_outputs, collections.Iterable)
if subset_outputs or in_outputs:
if not subset_outputs:
in_outputs = stk_defn.enabled_output_names()
refd_attrs |= get_dep_attrs(stk_defn.output_definition(op_name)
for op_name in in_outputs)
if attributes.ALL_ATTRIBUTES in refd_attrs:
refd_attrs.remove(attributes.ALL_ATTRIBUTES)
refd_attrs |= (set(self.attributes) - {self.SHOW})
return refd_attrs
def node_data(self, stk_defn=None, for_resources=True, for_outputs=False):
"""Return a NodeData object representing the resource.
The NodeData object returned contains basic data about the resource,
including its name, ID and state, as well as its reference ID and any
attribute values that are used.
By default, those attribute values that are referenced by other
resources are included. These can be ignored by setting the
for_resources parameter to False. If the for_outputs parameter is
True, those attribute values that are referenced by stack outputs are
included. If the for_outputs parameter is an iterable of output names,
only those attribute values referenced by the specified stack outputs
are included.
The set of referenced attributes is calculated from the
StackDefinition object provided, or from the stack's current
definition if none is passed.
After calling this method, the resource's attribute cache is
populated with any cacheable attribute values referenced by stack
outputs, even if they are not also referenced by other resources.
"""
def get_attrs(attrs, cacheable_only=False):
for attr in attrs:
path = (attr,) if isinstance(attr, six.string_types) else attr
if (cacheable_only and
(self.attributes.get_cache_mode(path[0]) ==
attributes.Schema.CACHE_NONE)):
continue
if self.action == self.INIT:
if (path[0] in self.attributes or
(type(self).get_attribute != Resource.get_attribute or
type(self).FnGetAtt != Resource.FnGetAtt)):
# TODO(ricolin) make better placeholder values here
yield attr, None
else:
try:
yield attr, self.FnGetAtt(*path)
except exception.InvalidTemplateAttribute as ita:
# Attribute doesn't exist, so don't store it. Whatever
# tries to access it will get another
# InvalidTemplateAttribute exception at that point
LOG.info('%s', ita)
except Exception as exc:
# Store the exception that occurred. It will be
# re-raised when something tries to access it, or when
# we try to serialise the NodeData.
yield attr, exc
load_all = not self.stack.in_convergence_check
dep_attrs = self.referenced_attrs(stk_defn,
in_resources=for_resources,
in_outputs=for_outputs,
load_all=load_all)
# Ensure all attributes referenced in outputs get cached
if for_outputs is False and self.stack.convergence:
out_attrs = self.referenced_attrs(stk_defn, in_resources=False,
load_all=load_all)
for e in get_attrs(out_attrs - dep_attrs, cacheable_only=True):
pass
# Calculate attribute values *before* reference ID, to potentially
# save an extra RPC call in TemplateResource
attribute_values = dict(get_attrs(dep_attrs))
return node_data.NodeData(self.id, self.name, self.uuid,
self.FnGetRefId(), attribute_values,
self.action, self.status)
def preview(self):
"""Default implementation of Resource.preview.
This method should be overridden by child classes for specific
behavior.
"""
return self
def create_convergence(self, template_id, resource_data, engine_id,
timeout, progress_callback=None):
"""Creates the resource by invoking the scheduler TaskRunner."""
self._calling_engine_id = engine_id
self.requires = list(
set(data.primary_key for data in resource_data.values()
if data is not None)
)
self.current_template_id = template_id
if self.stack.adopt_stack_data is None:
runner = scheduler.TaskRunner(self.create)
else:
adopt_data = self.stack._adopt_kwargs(self)
runner = scheduler.TaskRunner(self.adopt, **adopt_data)
runner(timeout=timeout, progress_callback=progress_callback)
def validate_external(self):
if self.external_id is not None:
try:
self.resource_id = self.external_id
self._show_resource()
except Exception as ex:
if self.client_plugin().is_not_found(ex):
error_message = (_("Invalid external resource: Resource "
"%(external_id)s (%(type)s) can not "
"be found.") %
{'external_id': self.external_id,
'type': self.type()})
raise exception.StackValidationFailed(
message="%s" % error_message)
raise
@scheduler.wrappertask
def create(self):
"""Create the resource.
Subclasses should provide a handle_create() method to customise
creation.
"""
action = self.CREATE
if (self.action, self.status) != (self.INIT, self.COMPLETE):
exc = exception.Error(_('State %s invalid for create')
% six.text_type(self.state))
raise exception.ResourceFailure(exc, self, action)
if self.external_id is not None:
yield self._do_action(self.ADOPT,
resource_data={
'resource_id': self.external_id})
self.check()
return
# This method can be called when we replace a resource, too. In that
# case, a hook has already been dealt with in `Resource.update` so we
# shouldn't do it here again:
if self.stack.action == self.stack.CREATE:
yield self._break_if_required(
self.CREATE, environment.HOOK_PRE_CREATE)
LOG.info('creating %s', self)
# Re-resolve the template, since if the resource Ref's
# the StackId pseudo parameter, it will change after
# the parser.Stack is stored (which is after the resources
# are __init__'d, but before they are create()'d). We also
# do client lookups for RESOLVE translation rules here.
self.reparse()
self._update_stored_properties()
count = {self.CREATE: 0, self.DELETE: 0}
retry_limit = max(cfg.CONF.action_retry_limit, 0)
first_failure = None
while (count[self.CREATE] <= retry_limit and
count[self.DELETE] <= retry_limit):
pre_func = None
if count[action] > 0:
delay = timeutils.retry_backoff_delay(count[action],
jitter_max=2.0)
waiter = scheduler.TaskRunner(self.pause)
yield waiter.as_task(timeout=delay)
elif action == self.CREATE:
# Only validate properties in first create call.
pre_func = self.properties.validate
try:
yield self._do_action(action, pre_func)
if action == self.CREATE:
first_failure = None
break
else:
action = self.CREATE
except exception.ResourceFailure as failure:
if isinstance(failure.exc, exception.StackValidationFailed):
path = [self.t.name]
path.extend(failure.exc.path)
raise exception.ResourceFailure(
exception_or_error=exception.StackValidationFailed(
error=failure.exc.error,
path=path,
message=failure.exc.error_message
),
resource=failure.resource,
action=failure.action
)
if not isinstance(failure.exc, exception.ResourceInError):
raise failure
count[action] += 1
if action == self.CREATE:
action = self.DELETE
count[action] = 0
if first_failure is None:
# Save the first exception
first_failure = failure
if first_failure:
raise first_failure
if self.stack.action == self.stack.CREATE:
yield self._break_if_required(
self.CREATE, environment.HOOK_POST_CREATE)
@staticmethod
def pause():
try:
while True:
yield
except scheduler.Timeout:
return
def prepare_abandon(self):
self.abandon_in_progress = True
return {
'name': self.name,
'resource_id': self.resource_id,
'type': self.type(),
'action': self.action,
'status': self.status,
'metadata': self.metadata_get(),
'resource_data': self.data()
}
def adopt(self, resource_data):
"""Adopt the existing resource.
Resource subclasses can provide a handle_adopt() method to customise
adopt.
"""
self._update_stored_properties()
return self._do_action(self.ADOPT, resource_data=resource_data)
def handle_adopt(self, resource_data=None):
resource_id, data, metadata = self._get_resource_info(resource_data)
if not resource_id:
exc = Exception(_('Resource ID was not provided.'))
failure = exception.ResourceFailure(exc, self)
raise failure
# set resource id
self.resource_id_set(resource_id)
# save the resource data
if data and isinstance(data, dict):
for key, value in six.iteritems(data):
self.data_set(key, value)
# save the resource metadata
self.metadata_set(metadata)
def translation_rules(self, properties):
"""Return specified rules for resource."""
return []
def translate_properties(self, properties,
client_resolve=True):
"""Set resource specific rules for properties translation.
The properties parameter is a properties object and the
optional client_resolve flag is to specify whether to
do 'RESOLVE' translation with client lookup.
"""
rules = self.translation_rules(properties) or []
properties.update_translation(rules, client_resolve=client_resolve)
def cancel_grace_period(self):
canceller = getattr(self,
'handle_%s_cancel' % self.action.lower(),
None)
if callable(canceller):
return None
return cfg.CONF.error_wait_time
def _get_resource_info(self, resource_data):
if not resource_data:
return None, None, None
return (resource_data.get('resource_id'),
resource_data.get('resource_data'),
resource_data.get('metadata'))
def needs_replace(self, after_props):
"""Mandatory replace based on certain properties."""
return False
def needs_replace_with_prop_diff(self, changed_properties_set,
after_props, before_props):
"""Needs replace based on prop_diff."""
return False
def needs_replace_with_tmpl_diff(self, tmpl_diff):
"""Needs replace based on tmpl_diff."""
return False
def needs_replace_failed(self):
"""Needs replace if resource is in *_FAILED."""
return True
def _needs_update(self, after, before, after_props, before_props,
prev_resource, check_init_complete=True):
if self.status == self.FAILED:
# always replace when a resource is in CHECK_FAILED
if self.action == self.CHECK or self.needs_replace_failed():
raise UpdateReplace(self)
if self.state == (self.DELETE, self.COMPLETE):
raise UpdateReplace(self)
if (check_init_complete and
self.state == (self.INIT, self.COMPLETE)):
raise UpdateReplace(self)
if self.needs_replace(after_props):
raise UpdateReplace(self)
if before != after.freeze():
return True
try:
return before_props != after_props
except ValueError:
return True
def _check_for_convergence_replace(self, restricted_actions):
if 'replace' in restricted_actions:
ex = exception.ResourceActionRestricted(action='replace')
failure = exception.ResourceFailure(ex, self, self.UPDATE)
self._add_event(self.UPDATE, self.FAILED, six.text_type(ex))
raise failure
else:
raise UpdateReplace(self.name)
def update_convergence(self, template_id, resource_data, engine_id,
timeout, new_stack, progress_callback=None):
"""Update the resource synchronously.
Persist the resource's current_template_id to template_id and
resource's requires to list of the required resource ids from the given
resource_data and existing resource's requires, then updates the
resource by invoking the scheduler TaskRunner.
"""
def update_templ_id_and_requires(persist=True):
self.current_template_id = template_id
self.requires = list(
set(data.primary_key for data in resource_data.values()
if data is not None)
)
if not persist:
return
self.store(lock=self.LOCK_RESPECT)
self._calling_engine_id = engine_id
# Check that the resource type matches. If the type has changed by a
# legitimate substitution, the load()ed resource will already be of
# the new type.
registry = new_stack.env.registry
new_res_def = new_stack.defn.resource_definition(self.name)
new_res_type = registry.get_class_to_instantiate(
new_res_def.resource_type, resource_name=self.name)
if type(self) is not new_res_type:
restrictions = registry.get_rsrc_restricted_actions(self.name)
self._check_for_convergence_replace(restrictions)
action_rollback = self.stack.action == self.stack.ROLLBACK
status_in_progress = self.stack.status == self.stack.IN_PROGRESS
if action_rollback and status_in_progress and self.replaced_by:
try:
self.restore_prev_rsrc(convergence=True)
except Exception as e:
failure = exception.ResourceFailure(e, self, self.action)
self.state_set(self.UPDATE, self.FAILED,
six.text_type(failure))
raise failure
runner = scheduler.TaskRunner(
self.update, new_res_def,
update_templ_func=update_templ_id_and_requires)
try:
runner(timeout=timeout, progress_callback=progress_callback)
except UpdateReplace:
raise
except exception.UpdateInProgress:
raise
except BaseException:
with excutils.save_and_reraise_exception():
update_templ_id_and_requires(persist=True)
def preview_update(self, after, before, after_props, before_props,
prev_resource, check_init_complete=False):
"""Simulates update without actually updating the resource.
Raises UpdateReplace, if replacement is required or returns True,
if in-place update is required.
"""
if self._needs_update(after, before, after_props, before_props,
prev_resource, check_init_complete):
tmpl_diff = self.update_template_diff(after.freeze(), before)
if tmpl_diff and self.needs_replace_with_tmpl_diff(tmpl_diff):
raise UpdateReplace(self)
self.update_template_diff_properties(after_props, before_props)
return True
else:
return False
def _check_restricted_actions(self, actions, after, before,
after_props, before_props,
prev_resource):
"""Checks for restricted actions.
Raises ResourceActionRestricted, if the resource requires update
or replace and the required action is restricted.
Else, Raises UpdateReplace, if replacement is required or returns
True, if in-place update is required.
"""
try:
if self.preview_update(after, before, after_props, before_props,
prev_resource, check_init_complete=True):
if 'update' in actions:
raise exception.ResourceActionRestricted(action='update')
return True
except UpdateReplace:
if 'replace' in actions:
raise exception.ResourceActionRestricted(action='replace')
raise
return False
def _prepare_update_props(self, after, before):
before_props = before.properties(self.properties_schema,
self.context)
# Regenerate the schema, else validation would fail
self.regenerate_info_schema(after)
after.set_translation_rules(self.translation_rules(self.properties))
after_props = after.properties(self.properties_schema,
self.context)
self.translate_properties(after_props)
self.translate_properties(before_props)
if (cfg.CONF.observe_on_update or self.converge) and before_props:
if not self.resource_id:
raise UpdateReplace(self)
try:
resource_reality = self.get_live_state(before_props)
if resource_reality:
self._update_properties_with_live_state(before_props,
resource_reality)
except exception.EntityNotFound:
raise UpdateReplace(self)
except Exception as ex:
LOG.warning("Resource cannot be updated with it's "
"live state in case of next "
"error: %s", ex)
return after_props, before_props
def _prepare_update_replace_handler(self, action):
"""Return the handler method for preparing to replace a resource.
This may be either restore_prev_rsrc() (in the case of a legacy
rollback) or, more typically, prepare_for_replace().
If the plugin has not overridden the method, then None is returned in
place of the default method (which is empty anyway).
"""
if (self.stack.action == 'ROLLBACK' and
self.stack.status == 'IN_PROGRESS' and
not self.stack.convergence):
# handle case, when it's rollback and we should restore
# old resource
if self.restore_prev_rsrc != Resource.restore_prev_rsrc:
return self.restore_prev_rsrc
else:
if self.prepare_for_replace != Resource.prepare_for_replace:
return self.prepare_for_replace
return None
def _prepare_update_replace(self, action):
handler = self._prepare_update_replace_handler(action)
if handler is None:
return
try:
handler()
except Exception as e:
# if any exception happen, we should set the resource to
# FAILED, then raise ResourceFailure
failure = exception.ResourceFailure(e, self, action)
self.state_set(action, self.FAILED, six.text_type(failure))
raise failure
@classmethod
def check_is_substituted(cls, new_res_type):
support_status = getattr(cls, 'support_status', None)
if support_status:
is_substituted = support_status.is_substituted(new_res_type)
return is_substituted
return False
@scheduler.wrappertask
def update(self, after, before=None, prev_resource=None,
update_templ_func=None):
"""Return a task to update the resource.
Subclasses should provide a handle_update() method to customise update,
the base-class handle_update will fail by default.
"""
action = self.UPDATE
assert isinstance(after, rsrc_defn.ResourceDefinition)
if before is None:
before = self.frozen_definition()
after_external_id = after.external_id()
if self.external_id != after_external_id:
msg = _("Update to property %(prop)s of %(name)s (%(res)s)"
) % {'prop': hot_tmpl.HOTemplate20161014.RES_EXTERNAL_ID,
'res': self.type(), 'name': self.name}
exc = exception.NotSupported(feature=msg)
raise exception.ResourceFailure(exc, self, action)
elif after_external_id is not None:
LOG.debug("Skip update on external resource.")
return
after_props, before_props = self._prepare_update_props(after, before)
yield self._break_if_required(
self.UPDATE, environment.HOOK_PRE_UPDATE)
try:
registry = self.stack.env.registry
restr_actions = registry.get_rsrc_restricted_actions(self.name)
if restr_actions:
needs_update = self._check_restricted_actions(restr_actions,
after, before,
after_props,
before_props,
prev_resource)
else:
needs_update = self._needs_update(after, before,
after_props, before_props,
prev_resource)
except UpdateReplace:
with excutils.save_and_reraise_exception():
if self._prepare_update_replace_handler(action) is not None:
with self.lock(self._calling_engine_id):
self._prepare_update_replace(action)
except exception.ResourceActionRestricted as ae:
failure = exception.ResourceFailure(ae, self, action)
self._add_event(action, self.FAILED, six.text_type(ae))
raise failure
if not needs_update:
if update_templ_func is not None:
update_templ_func(persist=True)
if self.status == self.FAILED:
status_reason = _('Update status to COMPLETE for '
'FAILED resource neither update '
'nor replace.')
lock = (self.LOCK_RESPECT if self.stack.convergence
else self.LOCK_NONE)
self.state_set(self.action, self.COMPLETE,
status_reason, lock=lock)
return
if not self.stack.convergence:
if (self.action, self.status) in (
(self.CREATE, self.IN_PROGRESS),
(self.UPDATE, self.IN_PROGRESS),
(self.ADOPT, self.IN_PROGRESS)):
exc = Exception(_('Resource update already requested'))
raise exception.ResourceFailure(exc, self, action)
LOG.info('updating %s', self)
self.updated_time = datetime.utcnow()
with self._action_recorder(action, UpdateReplace):
after_props.validate()
self.properties = before_props
tmpl_diff = self.update_template_diff(after.freeze(), before)
try:
if tmpl_diff and self.needs_replace_with_tmpl_diff(tmpl_diff):
raise UpdateReplace(self)
prop_diff = self.update_template_diff_properties(after_props,
before_props)
yield self.action_handler_task(action,
args=[after, tmpl_diff,
prop_diff])
except UpdateReplace:
with excutils.save_and_reraise_exception():
self._prepare_update_replace(action)
self.t = after
self.reparse()
self._update_stored_properties()
if update_templ_func is not None:
# template/requires will be persisted by _action_recorder()
update_templ_func(persist=False)
yield self._break_if_required(
self.UPDATE, environment.HOOK_POST_UPDATE)
def prepare_for_replace(self):
"""Prepare resource for replacing.
Some resources requires additional actions before replace them.
If resource need to be changed before replacing, this method should
be implemented in resource class.
"""
pass
def restore_prev_rsrc(self, convergence=False):
"""Restore resource after rollback.
Some resources requires additional actions after rollback.
If resource need to be changed during rollback, this method should
be implemented in resource class.
"""
pass
def check(self):
"""Checks that the physical resource is in its expected state.
Gets the current status of the physical resource and updates the
database accordingly. If check is not supported by the resource,
default action is to fail and revert the resource's status to its
original state with the added message that check was not performed.
"""
action = self.CHECK
LOG.info('Checking %s', self)
if hasattr(self, 'handle_%s' % action.lower()):
if self.state == (self.INIT, self.COMPLETE):
reason = _('Can not check %s, resource not '
'created yet.') % self.name
self.state_set(action, self.FAILED, reason)
exc = Exception(_('Resource %s not created yet.') % self.name)
failure = exception.ResourceFailure(exc, self, action)
raise failure
with self.frozen_properties():
return self._do_action(action)
else:
reason = '%s not supported for %s' % (action, self.type())
self.state_set(action, self.COMPLETE, reason)
def _verify_check_conditions(self, checks):
def valid(check):
if isinstance(check['expected'], list):
return check['current'] in check['expected']
else:
return check['current'] == check['expected']
msg = _("'%(attr)s': expected '%(expected)s', got '%(current)s'")
invalid_checks = [
msg % check
for check in checks
if not valid(check)
]
if invalid_checks:
raise exception.Error('; '.join(invalid_checks))
def suspend(self):
"""Return a task to suspend the resource.
Subclasses should provide a handle_suspend() method to implement
suspend.
"""
action = self.SUSPEND
# Don't try to suspend the resource unless it's in a stable state
# or if the previous suspend failed
if (self.action == self.DELETE or
(self.action != self.SUSPEND and
self.status != self.COMPLETE)):
exc = exception.Error(_('State %s invalid for suspend')
% six.text_type(self.state))
raise exception.ResourceFailure(exc, self, action)
LOG.info('suspending %s', self)
with self.frozen_properties():
return self._do_action(action)
def resume(self):
"""Return a task to resume the resource.
Subclasses should provide a handle_resume() method to implement resume.
"""
action = self.RESUME
# Allow resume a resource if it's SUSPEND_COMPLETE
# or RESUME_FAILED or RESUME_COMPLETE. Recommend to check
# the real state of physical resource in handle_resume()
if self.state not in ((self.SUSPEND, self.COMPLETE),
(self.RESUME, self.FAILED),
(self.RESUME, self.COMPLETE)):
exc = exception.Error(_('State %s invalid for resume')
% six.text_type(self.state))
raise exception.ResourceFailure(exc, self, action)
LOG.info('resuming %s', self)
with self.frozen_properties():
return self._do_action(action)
def snapshot(self):
"""Snapshot the resource and return the created data, if any."""
LOG.info('snapshotting %s', self)
with self.frozen_properties():
return self._do_action(self.SNAPSHOT)
@scheduler.wrappertask
def delete_snapshot(self, data):
yield self.action_handler_task('delete_snapshot', args=[data])
def physical_resource_name(self):
if self.id is None or self.action == self.INIT:
return None
name = '%s-%s-%s' % (self.stack.name.rstrip('*'),
self.name,
short_id.get_id(self.uuid))
if self.physical_resource_name_limit:
name = self.reduce_physical_resource_name(
name, self.physical_resource_name_limit)
return name
@staticmethod
def reduce_physical_resource_name(name, limit):
"""Reduce length of physical resource name to a limit.
The reduced name will consist of the following:
* the first 2 characters of the name
* a hyphen
* the end of the name, truncated on the left to bring
the name length within the limit
:param name: The name to reduce the length of
:param limit: The max length limit
:returns: A name whose length is less than or equal to the limit
"""
if len(name) <= limit:
return name
if limit < 4:
raise ValueError(_('limit cannot be less than 4'))
postfix_length = limit - 3
return name[0:2] + '-' + name[-postfix_length:]
def validate(self):
"""Validate the resource.
This may be overridden by resource plugins to add extra
validation logic specific to the resource implementation.
"""
LOG.info('Validating %s', self)
return self.validate_template()
def validate_template(self):
"""Validate structural/syntax aspects of the resource definition.
Resource plugins should not override this, because this interface
is expected to be called pre-create so things normally valid
in an overridden validate() such as accessing properties
may not work.
"""
self._validate_service_availability(
self.stack.context,
self.t.resource_type
)
try:
self.t.validate()
self.validate_deletion_policy(self.t.deletion_policy())
self.t.update_policy(self.update_policy_schema,
self.context).validate()
validate = self.properties.validate(
with_value=self.stack.strict_validate)
except exception.StackValidationFailed as ex:
path = [self.stack.t.RESOURCES, self.t.name]
if ex.path:
path.append(self.stack.t.get_section_name(ex.path[0]))
path.extend(ex.path[1:])
raise exception.StackValidationFailed(
error=ex.error,
path=path,
message=ex.error_message)
return validate
@classmethod
def validate_deletion_policy(cls, policy):
path = rsrc_defn.DELETION_POLICY
if policy not in rsrc_defn.ResourceDefinition.DELETION_POLICIES:
msg = _('Invalid deletion policy "%s"') % policy
raise exception.StackValidationFailed(message=msg, path=path)
if policy == rsrc_defn.ResourceDefinition.SNAPSHOT:
if not callable(getattr(cls, 'handle_snapshot_delete', None)):
msg = _('"%s" deletion policy not supported') % policy
raise exception.StackValidationFailed(message=msg, path=path)
def _update_replacement_data(self, template_id):
# Update the replacement resource's needed_by and replaces
# fields. Make sure that the replacement belongs to the given
# template and there is no engine working on it.
if self.replaced_by is None:
return
try:
db_res = resource_objects.Resource.get_obj(
self.context, self.replaced_by,
fields=('current_template_id', 'atomic_key'))
except exception.NotFound:
LOG.info("Could not find replacement of resource %(name)s "
"with id %(id)s while updating needed_by.",
{'name': self.name, 'id': self.replaced_by})
return
if (db_res.current_template_id == template_id):
# Following update failure is ignorable; another
# update might have locked/updated the resource.
if db_res.select_and_update(
{'needed_by': self.needed_by,
'replaces': None},
atomic_key=db_res.atomic_key,
expected_engine_id=None):
self._incr_atomic_key(self._atomic_key)
def delete_convergence(self, template_id, input_data, engine_id, timeout,
progress_callback=None):
"""Destroys the resource if it doesn't belong to given template.
The given template is suppose to be the current template being
provisioned.
Also, since this resource is visited as part of clean-up phase,
the needed_by should be updated. If this resource was
replaced by more recent resource, then delete this and update
the replacement resource's needed_by and replaces fields.
"""
self._calling_engine_id = engine_id
self.needed_by = list(set(v for v in input_data.values()
if v is not None))
if self.current_template_id != template_id:
# just delete the resources in INIT state
if self.action == self.INIT:
try:
resource_objects.Resource.delete(self.context, self.id)
except exception.NotFound:
pass
else:
runner = scheduler.TaskRunner(self.delete)
runner(timeout=timeout,
progress_callback=progress_callback)
self._update_replacement_data(template_id)
def handle_delete(self):
"""Default implementation; should be overridden by resources."""
if self.entity and self.resource_id is not None:
try:
obj = getattr(self.client(), self.entity)
obj.delete(self.resource_id)
except Exception as ex:
if self.default_client_name is not None:
self.client_plugin().ignore_not_found(ex)
return None
raise
return self.resource_id
@scheduler.wrappertask
def delete(self):
"""A task to delete the resource.
Subclasses should provide a handle_delete() method to customise
deletion.
"""
@excutils.exception_filter
def should_retry(exc):
if count >= retry_limit:
return False
if self.default_client_name:
return (self.client_plugin().is_conflict(exc) or
isinstance(exc, exception.PhysicalResourceExists))
return isinstance(exc, exception.PhysicalResourceExists)
action = self.DELETE
if (self.action, self.status) == (self.DELETE, self.COMPLETE):
return
# No need to delete if the resource has never been created
if self.action == self.INIT:
return
initial_state = self.state
# This method can be called when we replace a resource, too. In that
# case, a hook has already been dealt with in `Resource.update` so we
# shouldn't do it here again:
if self.stack.action == self.stack.DELETE:
yield self._break_if_required(
self.DELETE, environment.HOOK_PRE_DELETE)
LOG.info('deleting %s', self)
if self._stored_properties_data is not None:
# On delete we can't rely on re-resolving the properties
# so use the stored frozen_definition instead
self.properties = self.frozen_definition().properties(
self.properties_schema, self.context)
self.translate_properties(self.properties)
with self._action_recorder(action):
if self.abandon_in_progress:
deletion_policy = self.t.RETAIN
else:
deletion_policy = self.t.deletion_policy()
if deletion_policy != self.t.RETAIN:
if deletion_policy == self.t.SNAPSHOT:
action_args = [[initial_state], 'snapshot']
else:
action_args = []
count = -1
retry_limit = max(cfg.CONF.action_retry_limit, 0)
while True:
count += 1
LOG.info('delete %(name)s attempt %(attempt)d' %
{'name': six.text_type(self), 'attempt': count+1})
if count:
delay = timeutils.retry_backoff_delay(count,
jitter_max=2.0)
waiter = scheduler.TaskRunner(self.pause)
yield waiter.as_task(timeout=delay)
with excutils.exception_filter(should_retry):
yield self.action_handler_task(action,
*action_args)
break
if self.stack.action == self.stack.DELETE:
yield self._break_if_required(
self.DELETE, environment.HOOK_POST_DELETE)
@scheduler.wrappertask
def destroy(self):
"""A task to delete the resource and remove it from the database."""
yield self.delete()
if self.id is None:
return
try:
resource_objects.Resource.delete(self.context, self.id)
except exception.NotFound:
# Don't fail on delete if the db entry has
# not been created yet.
pass
self.id = None
def resource_id_set(self, inst):
self.resource_id = inst
if self.id is not None:
try:
resource_objects.Resource.update_by_id(
self.context,
self.id,
{'physical_resource_id': self.resource_id})
except Exception as ex:
LOG.warning('db error %s', ex)
def store(self, set_metadata=False, lock=LOCK_NONE):
"""Create the resource in the database.
If self.id is set, we update the existing stack.
"""
if not self.root_stack_id:
self.root_stack_id = self.stack.root_stack_id()
rs = {'action': self.action,
'status': self.status,
'status_reason': six.text_type(self.status_reason),
'stack_id': self.stack.id,
'physical_resource_id': self.resource_id,
'name': self.name,
'rsrc_prop_data_id':
self._create_or_replace_rsrc_prop_data(),
'needed_by': self.needed_by,
'requires': self.requires,
'replaces': self.replaces,
'replaced_by': self.replaced_by,
'current_template_id': self.current_template_id,
'root_stack_id': self.root_stack_id,
'updated_at': self.updated_time,
'properties_data': None}
if set_metadata:
metadata = self.t.metadata()
rs['rsrc_metadata'] = metadata
self._rsrc_metadata = metadata
if self.id is not None:
if (lock == self.LOCK_NONE or
(lock in {self.LOCK_ACQUIRE, self.LOCK_RELEASE} and
self._calling_engine_id is None)):
resource_objects.Resource.update_by_id(
self.context, self.id, rs)
if lock != self.LOCK_NONE:
LOG.error('No calling_engine_id in store() %s',
six.text_type(rs))
else:
self._store_with_lock(rs, lock)
else:
new_rs = resource_objects.Resource.create(self.context, rs)
self.id = new_rs.id
self.uuid = new_rs.uuid
self.created_time = new_rs.created_at
def _store_with_lock(self, rs, lock):
if lock == self.LOCK_ACQUIRE:
rs['engine_id'] = self._calling_engine_id
expected_engine_id = None
elif lock == self.LOCK_RESPECT:
expected_engine_id = None
elif lock == self.LOCK_RELEASE:
expected_engine_id = self._calling_engine_id
rs['engine_id'] = None
else:
assert False, "Invalid lock action: %s" % lock
if resource_objects.Resource.select_and_update_by_id(
self.context, self.id, rs, expected_engine_id,
self._atomic_key):
self._incr_atomic_key(self._atomic_key)
else:
LOG.info('Resource %s is locked or does not exist',
six.text_type(self))
LOG.debug('Resource id:%(resource_id)s locked or does not exist. '
'Expected atomic_key:%(atomic_key)s, '
'accessing from engine_id:%(engine_id)s',
{'resource_id': self.id,
'atomic_key': self._atomic_key,
'engine_id': self._calling_engine_id})
raise exception.UpdateInProgress(self.name)
def _add_event(self, action, status, reason):
"""Add a state change event to the database."""
physical_res_id = self.resource_id or self.physical_resource_name()
ev = event.Event(self.context, self.stack, action, status, reason,
physical_res_id, self._rsrc_prop_data_id,
self._stored_properties_data, self.name, self.type())
ev.store()
self.stack.dispatch_event(ev)
@contextlib.contextmanager
def lock(self, engine_id):
self._calling_engine_id = engine_id
try:
if engine_id is not None:
self._store_with_lock({}, self.LOCK_ACQUIRE)
yield
except exception.UpdateInProgress:
raise
except BaseException:
with excutils.save_and_reraise_exception():
if engine_id is not None:
self._store_with_lock({}, self.LOCK_RELEASE)
else:
if engine_id is not None:
self._store_with_lock({}, self.LOCK_RELEASE)
def _resolve_any_attribute(self, attr):
"""Method for resolving any attribute, including base attributes.
This method uses basic _resolve_attribute method for resolving
plugin-specific attributes. Base attributes will be resolved with
corresponding method, which should be defined in each resource
class.
:param attr: attribute name, which will be resolved
:returns: method of resource class, which resolve base attribute
"""
if attr in self.base_attributes_schema:
# check resource_id, because usually it is required for getting
# information about resource
if not self.resource_id:
return None
try:
return getattr(self, '_{0}_resource'.format(attr))()
except Exception as ex:
if self.default_client_name is not None:
self.client_plugin().ignore_not_found(ex)
return None
raise
else:
try:
return self._resolve_attribute(attr)
except Exception as ex:
if self.default_client_name is not None:
self.client_plugin().ignore_not_found(ex)
return None
raise
def _show_resource(self):
"""Default implementation; should be overridden by resources.
:returns: the map of resource information or None
"""
if self.entity:
try:
obj = getattr(self.client(), self.entity)
resource = obj.get(self.resource_id)
if isinstance(resource, dict):
return resource
else:
return resource.to_dict()
except AttributeError as ex:
LOG.warning("Resolving 'show' attribute has failed : %s",
ex)
return None
def get_live_resource_data(self):
"""Default implementation; can be overridden by resources.
Get resource data and handle it with exceptions.
"""
try:
resource_data = self._show_resource()
except Exception as ex:
if (self.default_client_name is not None and
self.client_plugin().is_not_found(ex)):
raise exception.EntityNotFound(
entity='Resource', name=self.name)
raise
return resource_data
def parse_live_resource_data(self, resource_properties, resource_data):
"""Default implementation; can be overridden by resources.
Parse resource data for using it in updating properties with live
state.
:param resource_properties: properties of stored resource plugin.
:param resource_data: data from current live state of a resource.
"""
resource_result = {}
for key in self._update_allowed_properties:
if key in resource_data:
if key == 'name' and resource_properties.get(key) is None:
# We use `physical_resource_name` for name property in some
# resources when name not provided during create, so we
# shouldn't add name in resource_data if it's None in
# property (might just the cases that we using
# `physical_resource_name`).
continue
resource_result[key] = resource_data.get(key)
return resource_result
def get_live_state(self, resource_properties):
"""Default implementation; should be overridden by resources.
:param resource_properties: resource's object of Properties class.
:returns: dict of resource's real state of properties.
"""
resource_data = self.get_live_resource_data()
if resource_data is None:
return {}
return self.parse_live_resource_data(resource_properties,
resource_data)
def _update_properties_with_live_state(self, resource_properties,
live_properties):
"""Update resource properties data with live state properties.
Note, that live_properties can contains None values, so there's next
situation: property equals to some value, but live state has no such
property, i.e. property equals to None, so during update property
should be updated with None.
"""
for key in resource_properties:
if key in live_properties:
if resource_properties.get(key) != live_properties.get(key):
resource_properties.data.update(
{key: live_properties.get(key)})
def _resolve_attribute(self, name):
"""Default implementation of resolving resource's attributes.
Should be overridden by resources, that expose attributes.
:param name: The attribute to resolve
:returns: the resource attribute named key
"""
# By default, no attributes resolve
pass
def regenerate_info_schema(self, definition):
"""Default implementation; should be overridden by resources.
Should be overridden by resources that would require schema refresh
during update, ex. TemplateResource.
:definition: Resource Definition
"""
# By default, do not regenerate
pass
def state_reset(self):
"""Reset state to (INIT, COMPLETE)."""
self.action = self.INIT
self.status = self.COMPLETE
def state_set(self, action, status, reason="state changed",
lock=LOCK_NONE):
if action not in self.ACTIONS:
raise ValueError(_("Invalid action %s") % action)
if status not in self.STATUSES:
raise ValueError(_("Invalid status %s") % status)
old_state = (self.action, self.status)
new_state = (action, status)
set_metadata = self.action == self.INIT
self.action = action
self.status = status
self.status_reason = reason
self.store(set_metadata, lock=lock)
if new_state != old_state:
self._add_event(action, status, reason)
if status != self.COMPLETE:
self.clear_stored_attributes()
@property
def state(self):
"""Returns state, tuple of action, status."""
return (self.action, self.status)
def store_attributes(self):
assert self.id is not None
if self.status != self.COMPLETE or self.action in (self.INIT,
self.DELETE):
return
if not self.attributes.has_new_cached_attrs():
return
try:
attr_data_id = resource_objects.Resource.store_attributes(
self.context, self.id, self._atomic_key,
self.attributes.cached_attrs, self._attr_data_id)
if attr_data_id is not None:
self._attr_data_id = attr_data_id
except Exception as ex:
LOG.error('store_attributes rsrc %(name)s %(id)s DB error %(ex)s',
{'name': self.name, 'id': self.id, 'ex': ex})
def clear_stored_attributes(self):
if self._attr_data_id:
resource_objects.Resource.attr_data_delete(
self.context, self.id, self._attr_data_id)
self.attributes.reset_resolved_values()
def get_reference_id(self):
"""Default implementation for function get_resource.
This may be overridden by resource plugins to add extra
logic specific to the resource implementation.
"""
if self.resource_id is not None:
return six.text_type(self.resource_id)
else:
return six.text_type(self.name)
def FnGetRefId(self):
"""For the intrinsic function Ref.
:results: the id or name of the resource.
"""
return self.get_reference_id()
def physical_resource_name_or_FnGetRefId(self):
res_name = self.physical_resource_name()
if res_name is not None:
return six.text_type(res_name)
else:
return Resource.get_reference_id(self)
def get_attribute(self, key, *path):
"""Default implementation for function get_attr and Fn::GetAtt.
This may be overridden by resource plugins to add extra
logic specific to the resource implementation.
"""
try:
attribute = self.attributes[key]
except KeyError:
raise exception.InvalidTemplateAttribute(resource=self.name,
key=key)
return attributes.select_from_attribute(attribute, path)
def FnGetAtt(self, key, *path):
"""For the intrinsic function Fn::GetAtt.
:param key: the attribute key.
:param path: a list of path components to select from the attribute.
:returns: the attribute value.
"""
cache_custom = ((self.attributes.get_cache_mode(key) !=
attributes.Schema.CACHE_NONE) and
(type(self).get_attribute != Resource.get_attribute))
if cache_custom:
if path:
full_key = sync_point.str_pack_tuple((key,) + path)
else:
full_key = key
if full_key in self.attributes.cached_attrs:
return self.attributes.cached_attrs[full_key]
attr_val = self.get_attribute(key, *path)
if cache_custom:
self.attributes.set_cached_attr(full_key, attr_val)
return attr_val
def _signal_check_action(self):
if self.action in self.no_signal_actions:
self._add_event(self.action, self.status,
'Cannot signal resource during %s' % self.action)
msg = _('Signal resource during %s') % self.action
raise exception.NotSupported(feature=msg)
def _signal_check_hook(self, details):
if details and 'unset_hook' in details:
hook = details['unset_hook']
if not environment.valid_hook_type(hook):
msg = (_('Invalid hook type "%(hook)s" for %(resource)s') %
{'hook': hook, 'resource': six.text_type(self)})
raise exception.InvalidBreakPointHook(message=msg)
if not self.has_hook(hook):
msg = (_('The "%(hook)s" hook is not defined '
'on %(resource)s') %
{'hook': hook, 'resource': six.text_type(self)})
raise exception.InvalidBreakPointHook(message=msg)
def _unset_hook(self, details):
# Clear the hook without interfering with resources'
# `handle_signal` callbacks:
hook = details['unset_hook']
self.clear_hook(hook)
LOG.info('Clearing %(hook)s hook on %(resource)s',
{'hook': hook, 'resource': six.text_type(self)})
self._add_event(self.action, self.status,
"Hook %s is cleared" % hook)
def _handle_signal(self, details):
if not callable(getattr(self, 'handle_signal', None)):
raise exception.ResourceActionNotSupported(action='signal')
def get_string_details():
if details is None:
return 'No signal details provided'
if isinstance(details, six.string_types):
return details
if isinstance(details, dict):
if all(k in details for k in ('previous', 'current',
'reason')):
# this is from Ceilometer.
auto = '%(previous)s to %(current)s (%(reason)s)' % details
return 'alarm state changed from %s' % auto
return 'Unknown'
try:
signal_result = self.handle_signal(details)
if signal_result:
reason_string = "Signal: %s" % signal_result
else:
reason_string = get_string_details()
self._add_event('SIGNAL', self.status, reason_string)
except NoActionRequired:
# Don't log an event as it just spams the user.
pass
except Exception as ex:
if hasattr(self, '_db_res_is_deleted'):
# No spam required
return
LOG.info('signal %(name)s : %(msg)s',
{'name': six.text_type(self),
'msg': six.text_type(ex)},
exc_info=True)
failure = exception.ResourceFailure(ex, self)
raise failure
def signal(self, details=None, need_check=True):
"""Signal the resource.
Returns True if the metadata for all resources in the stack needs to
be regenerated as a result of the signal, False if it should not be.
Subclasses should provide a handle_signal() method to implement the
signal. The base-class raise an exception if no handler is implemented.
"""
if need_check:
self._signal_check_hook(details)
if details and 'unset_hook' in details:
self._unset_hook(details)
return False
if need_check:
self._signal_check_action()
with self.frozen_properties():
self._handle_signal(details)
return self.signal_needs_metadata_updates
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
raise UpdateReplace(self.name)
def metadata_update(self, new_metadata=None):
"""No-op for resources which don't explicitly override this method."""
if new_metadata:
LOG.warning("Resource %s does not implement metadata update",
self.name)
@classmethod
def resource_to_template(cls, resource_type, template_type='cfn'):
"""Generate a provider template that mirrors the resource.
:param resource_type: The resource type to be displayed in the template
:param template_type: the template type to generate, cfn or hot.
:returns: A template where the resource's properties_schema is mapped
as parameters, and the resource's attributes_schema is mapped as
outputs
"""
props_schema = {}
for name, schema_dict in cls.properties_schema.items():
schema = properties.Schema.from_legacy(schema_dict)
if schema.support_status.status != support.HIDDEN:
props_schema[name] = schema
params, props = (properties.Properties.
schema_to_parameters_and_properties(props_schema,
template_type))
resource_name = cls.__name__
outputs = attributes.Attributes.as_outputs(resource_name, cls,
template_type)
description = 'Initial template of %s' % resource_name
return cls.build_template_dict(resource_name, resource_type,
template_type, params, props,
outputs, description)
@staticmethod
def build_template_dict(res_name, res_type, tmpl_type,
params, props, outputs, description):
if tmpl_type == 'hot':
tmpl_dict = {
hot_tmpl.HOTemplate20161014.VERSION: '2016-10-14',
hot_tmpl.HOTemplate20161014.DESCRIPTION: description,
hot_tmpl.HOTemplate20161014.PARAMETERS: params,
hot_tmpl.HOTemplate20161014.OUTPUTS: outputs,
hot_tmpl.HOTemplate20161014.RESOURCES: {
res_name: {
hot_tmpl.HOTemplate20161014.RES_TYPE: res_type,
hot_tmpl.HOTemplate20161014.RES_PROPERTIES: props}}}
else:
tmpl_dict = {
cfn_tmpl.CfnTemplate.ALTERNATE_VERSION: '2012-12-12',
cfn_tmpl.CfnTemplate.DESCRIPTION: description,
cfn_tmpl.CfnTemplate.PARAMETERS: params,
cfn_tmpl.CfnTemplate.RESOURCES: {
res_name: {
cfn_tmpl.CfnTemplate.RES_TYPE: res_type,
cfn_tmpl.CfnTemplate.RES_PROPERTIES: props}
},
cfn_tmpl.CfnTemplate.OUTPUTS: outputs}
return tmpl_dict
def data(self):
"""Return the resource data for this resource.
Use methods data_set and data_delete to modify the resource data
for this resource.
:returns: a dict representing the resource data for this resource.
"""
if self._data is None and self.id is not None:
try:
self._data = resource_data_objects.ResourceData.get_all(self)
except exception.NotFound:
pass
return self._data or {}
def data_set(self, key, value, redact=False):
"""Set a key in the resource data."""
resource_data_objects.ResourceData.set(self, key, value, redact)
# force fetch all resource data from the database again
self._data = None
def data_delete(self, key):
"""Remove a key from the resource data.
:returns: True if the key existed to delete.
"""
try:
resource_data_objects.ResourceData.delete(self, key)
except exception.NotFound:
return False
else:
# force fetch all resource data from the database again
self._data = None
return True
def _create_or_replace_rsrc_prop_data(self):
if self._rsrc_prop_data_id is not None:
return self._rsrc_prop_data_id
if not self._stored_properties_data:
return None
self._rsrc_prop_data_id = \
rpd_objects.ResourcePropertiesData(self.context).create(
self.context, self._stored_properties_data).id
return self._rsrc_prop_data_id
def is_using_neutron(self):
try:
sess_client = self.client('neutron').httpclient
if not sess_client.get_endpoint():
return False
except Exception:
return False
return True
@staticmethod
def _make_resolver(ref):
"""Return an attribute resolution method.
This builds a resolver without a strong reference to this resource, to
break a possible cycle.
"""
def resolve(attr):
res = ref()
if res is None:
raise RuntimeError("Resource collected")
return res._resolve_any_attribute(attr)
return resolve
|
py | 1a36674f27d454d7e134befa28499ce77b41fa6b | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Dict, List
import ray
import time
from ray.ray_constants import MEMORY_RESOURCE_UNIT_BYTES
class ClusterResources:
# TODO: make this configurable
refresh_interval = 0.1
latest_refresh_time = time.time() - refresh_interval
node_to_resources = {}
item_keys_mapping = {"num_cpus": "CPU"}
label_name = "__ray_spark_node_label"
@classmethod
def total_alive_nodes(cls):
cls._refresh()
return len(cls.node_to_resources)
@classmethod
def satisfy(cls, request: Dict[str, float]) -> List[str]:
cls._refresh()
satisfied = []
for host_name, resources in cls.node_to_resources.items():
if cls._compare_two_dict(resources, request):
satisfied.append(resources[cls.label_name])
return satisfied
@classmethod
def _refresh(cls):
if (time.time() - cls.latest_refresh_time) < cls.refresh_interval:
return
for node in ray.nodes():
if node["Alive"]:
host_name = node["NodeManagerHostname"]
resources = node["Resources"]
for key in resources:
if key.startswith("node:"):
resources[cls.label_name] = key
break
assert cls.label_name in resources,\
f"{resources} should contain a resource likes: 'node:10.0.0.131': 1.0"
cls.node_to_resources[host_name] = resources
cls.latest_refresh_time = time.time()
@classmethod
def _compare_two_dict(cls, available: Dict[str, float], request: Dict[str, float]) -> bool:
for k, v in request.items():
k = cls.item_keys_mapping.get(k, k)
if k not in available:
return False
if k == "memory":
v = int(v / MEMORY_RESOURCE_UNIT_BYTES)
if available[k] < v:
return False
return True
|
py | 1a3667df421067da577ce4e746ebdef83e8dbfbc | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK harness for executing Python Fns via the Fn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import collections
import json
import logging
import random
import re
import threading
from builtins import next
from builtins import object
from future.utils import itervalues
from google import protobuf
import apache_beam as beam
from apache_beam import coders
from apache_beam.coders import WindowedValueCoder
from apache_beam.coders import coder_impl
from apache_beam.internal import pickler
from apache_beam.io import iobase
from apache_beam.metrics import monitoring_infos
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners import common
from apache_beam.runners import pipeline_context
from apache_beam.runners.dataflow import dataflow_runner
from apache_beam.runners.worker import operation_specs
from apache_beam.runners.worker import operations
from apache_beam.runners.worker import statesampler
from apache_beam.transforms import sideinputs
from apache_beam.transforms import userstate
from apache_beam.utils import counters
from apache_beam.utils import proto_utils
from apache_beam.utils import timestamp
from apache_beam.utils import windowed_value
# This module is experimental. No backwards-compatibility guarantees.
DATA_INPUT_URN = 'urn:org.apache.beam:source:runner:0.1'
DATA_OUTPUT_URN = 'urn:org.apache.beam:sink:runner:0.1'
IDENTITY_DOFN_URN = 'urn:org.apache.beam:dofn:identity:0.1'
# TODO(vikasrk): Fix this once runner sends appropriate common_urns.
OLD_DATAFLOW_RUNNER_HARNESS_PARDO_URN = 'urn:beam:dofn:javasdk:0.1'
OLD_DATAFLOW_RUNNER_HARNESS_READ_URN = 'urn:org.apache.beam:source:java:0.1'
class RunnerIOOperation(operations.Operation):
"""Common baseclass for runner harness IO operations."""
def __init__(self, name_context, step_name, consumers, counter_factory,
state_sampler, windowed_coder, target, data_channel):
super(RunnerIOOperation, self).__init__(
name_context, None, counter_factory, state_sampler)
self.windowed_coder = windowed_coder
self.windowed_coder_impl = windowed_coder.get_impl()
# target represents the consumer for the bytes in the data plane for a
# DataInputOperation or a producer of these bytes for a DataOutputOperation.
self.target = target
self.data_channel = data_channel
for _, consumer_ops in consumers.items():
for consumer in consumer_ops:
self.add_receiver(consumer, 0)
class DataOutputOperation(RunnerIOOperation):
"""A sink-like operation that gathers outputs to be sent back to the runner.
"""
def set_output_stream(self, output_stream):
self.output_stream = output_stream
def process(self, windowed_value):
self.windowed_coder_impl.encode_to_stream(
windowed_value, self.output_stream, True)
self.output_stream.maybe_flush()
def finish(self):
self.output_stream.close()
super(DataOutputOperation, self).finish()
class DataInputOperation(RunnerIOOperation):
"""A source-like operation that gathers input from the runner.
"""
def __init__(self, operation_name, step_name, consumers, counter_factory,
state_sampler, windowed_coder, input_target, data_channel):
super(DataInputOperation, self).__init__(
operation_name, step_name, consumers, counter_factory, state_sampler,
windowed_coder, target=input_target, data_channel=data_channel)
# We must do this manually as we don't have a spec or spec.output_coders.
self.receivers = [
operations.ConsumerSet.create(
self.counter_factory, self.name_context.step_name, 0,
next(iter(itervalues(consumers))), self.windowed_coder)]
self.splitting_lock = threading.Lock()
def start(self):
super(DataInputOperation, self).start()
self.index = -1
self.stop = float('inf')
def process(self, windowed_value):
self.output(windowed_value)
def process_encoded(self, encoded_windowed_values):
input_stream = coder_impl.create_InputStream(encoded_windowed_values)
while input_stream.size() > 0:
with self.splitting_lock:
if self.index == self.stop - 1:
return
self.index += 1
decoded_value = self.windowed_coder_impl.decode_from_stream(
input_stream, True)
self.output(decoded_value)
def try_split(self, fraction_of_remainder, total_buffer_size):
with self.splitting_lock:
if total_buffer_size < self.index + 1:
total_buffer_size = self.index + 1
elif self.stop and total_buffer_size > self.stop:
total_buffer_size = self.stop
if self.index == -1:
# We are "finished" with the (non-existent) previous element.
current_element_progress = 1
else:
current_element_progress_object = (
self.receivers[0].current_element_progress())
if current_element_progress_object is None:
current_element_progress = 0.5
else:
current_element_progress = (
current_element_progress_object.fraction_completed)
# Now figure out where to split.
# The units here (except for keep_of_element_remainder) are all in
# terms of number of (possibly fractional) elements.
remainder = total_buffer_size - self.index - current_element_progress
keep = remainder * fraction_of_remainder
if current_element_progress < 1:
keep_of_element_remainder = keep / (1 - current_element_progress)
# If it's less than what's left of the current element,
# try splitting at the current element.
if keep_of_element_remainder < 1:
split = self.receivers[0].try_split(keep_of_element_remainder)
if split:
element_primary, element_residual = split
self.stop = self.index + 1
return self.index - 1, element_primary, element_residual, self.stop
# Otherwise, split at the closest element boundary.
# pylint: disable=round-builtin
stop_index = (
self.index + max(1, int(round(current_element_progress + keep))))
if stop_index < self.stop:
self.stop = stop_index
return self.stop - 1, None, None, self.stop
class _StateBackedIterable(object):
def __init__(self, state_handler, state_key, coder_or_impl):
self._state_handler = state_handler
self._state_key = state_key
if isinstance(coder_or_impl, coders.Coder):
self._coder_impl = coder_or_impl.get_impl()
else:
self._coder_impl = coder_or_impl
def __iter__(self):
data, continuation_token = self._state_handler.blocking_get(self._state_key)
while True:
input_stream = coder_impl.create_InputStream(data)
while input_stream.size() > 0:
yield self._coder_impl.decode_from_stream(input_stream, True)
if not continuation_token:
break
else:
data, continuation_token = self._state_handler.blocking_get(
self._state_key, continuation_token)
def __reduce__(self):
return list, (list(self),)
coder_impl.FastPrimitivesCoderImpl.register_iterable_like_type(
_StateBackedIterable)
class StateBackedSideInputMap(object):
def __init__(self, state_handler, transform_id, tag, side_input_data, coder):
self._state_handler = state_handler
self._transform_id = transform_id
self._tag = tag
self._side_input_data = side_input_data
self._element_coder = coder.wrapped_value_coder
self._target_window_coder = coder.window_coder
# TODO(robertwb): Limit the cache size.
self._cache = {}
def __getitem__(self, window):
target_window = self._side_input_data.window_mapping_fn(window)
if target_window not in self._cache:
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
ptransform_id=self._transform_id,
side_input_id=self._tag,
window=self._target_window_coder.encode(target_window),
key=b''))
state_handler = self._state_handler
access_pattern = self._side_input_data.access_pattern
if access_pattern == common_urns.side_inputs.ITERABLE.urn:
raw_view = _StateBackedIterable(
state_handler, state_key, self._element_coder)
elif (access_pattern == common_urns.side_inputs.MULTIMAP.urn or
access_pattern ==
dataflow_runner._DataflowSideInput.DATAFLOW_MULTIMAP_URN):
cache = {}
key_coder_impl = self._element_coder.key_coder().get_impl()
value_coder = self._element_coder.value_coder()
class MultiMap(object):
def __getitem__(self, key):
if key not in cache:
keyed_state_key = beam_fn_api_pb2.StateKey()
keyed_state_key.CopyFrom(state_key)
keyed_state_key.multimap_side_input.key = (
key_coder_impl.encode_nested(key))
cache[key] = _StateBackedIterable(
state_handler, keyed_state_key, value_coder)
return cache[key]
def __reduce__(self):
# TODO(robertwb): Figure out how to support this.
raise TypeError(common_urns.side_inputs.MULTIMAP.urn)
raw_view = MultiMap()
else:
raise ValueError(
"Unknown access pattern: '%s'" % access_pattern)
self._cache[target_window] = self._side_input_data.view_fn(raw_view)
return self._cache[target_window]
def is_globally_windowed(self):
return (self._side_input_data.window_mapping_fn
== sideinputs._global_window_mapping_fn)
def reset(self):
# TODO(BEAM-5428): Cross-bundle caching respecting cache tokens.
self._cache = {}
class CombiningValueRuntimeState(userstate.RuntimeState):
def __init__(self, underlying_bag_state, combinefn):
self._combinefn = combinefn
self._underlying_bag_state = underlying_bag_state
def _read_accumulator(self, rewrite=True):
merged_accumulator = self._combinefn.merge_accumulators(
self._underlying_bag_state.read())
if rewrite:
self._underlying_bag_state.clear()
self._underlying_bag_state.add(merged_accumulator)
return merged_accumulator
def read(self):
return self._combinefn.extract_output(self._read_accumulator())
def add(self, value):
# Prefer blind writes, but don't let them grow unboundedly.
# This should be tuned to be much lower, but for now exercise
# both paths well.
if random.random() < 0.5:
accumulator = self._read_accumulator(False)
self._underlying_bag_state.clear()
else:
accumulator = self._combinefn.create_accumulator()
self._underlying_bag_state.add(
self._combinefn.add_input(accumulator, value))
def clear(self):
self._underlying_bag_state.clear()
def _commit(self):
self._underlying_bag_state._commit()
class _ConcatIterable(object):
"""An iterable that is the concatination of two iterables.
Unlike itertools.chain, this allows reiteration.
"""
def __init__(self, first, second):
self.first = first
self.second = second
def __iter__(self):
for elem in self.first:
yield elem
for elem in self.second:
yield elem
coder_impl.FastPrimitivesCoderImpl.register_iterable_like_type(_ConcatIterable)
# TODO(BEAM-5428): Implement cross-bundle state caching.
class SynchronousBagRuntimeState(userstate.RuntimeState):
def __init__(self, state_handler, state_key, value_coder):
self._state_handler = state_handler
self._state_key = state_key
self._value_coder = value_coder
self._cleared = False
self._added_elements = []
def read(self):
return _ConcatIterable(
[] if self._cleared else _StateBackedIterable(
self._state_handler, self._state_key, self._value_coder),
self._added_elements)
def add(self, value):
self._added_elements.append(value)
def clear(self):
self._cleared = True
self._added_elements = []
def _commit(self):
if self._cleared:
self._state_handler.blocking_clear(self._state_key)
if self._added_elements:
value_coder_impl = self._value_coder.get_impl()
out = coder_impl.create_OutputStream()
for element in self._added_elements:
value_coder_impl.encode_to_stream(element, out, True)
self._state_handler.blocking_append(self._state_key, out.get())
class OutputTimer(object):
def __init__(self, key, window, receiver):
self._key = key
self._window = window
self._receiver = receiver
def set(self, ts):
ts = timestamp.Timestamp.of(ts)
self._receiver.receive(
windowed_value.WindowedValue(
(self._key, dict(timestamp=ts)), ts, (self._window,)))
def clear(self, timestamp):
self._receiver.receive((self._key, dict(clear=True)))
class FnApiUserStateContext(userstate.UserStateContext):
def __init__(
self, state_handler, transform_id, key_coder, window_coder, timer_specs):
self._state_handler = state_handler
self._transform_id = transform_id
self._key_coder = key_coder
self._window_coder = window_coder
self._timer_specs = timer_specs
self._timer_receivers = None
self._all_states = {}
def update_timer_receivers(self, receivers):
self._timer_receivers = {}
for tag in self._timer_specs:
self._timer_receivers[tag] = receivers.pop(tag)
def get_timer(self, timer_spec, key, window):
return OutputTimer(
key, window, self._timer_receivers[timer_spec.name])
def get_state(self, *args):
state_handle = self._all_states.get(args)
if state_handle is None:
state_handle = self._all_states[args] = self._create_state(*args)
return state_handle
def _create_state(self, state_spec, key, window):
if isinstance(state_spec,
(userstate.BagStateSpec, userstate.CombiningValueStateSpec)):
bag_state = SynchronousBagRuntimeState(
self._state_handler,
state_key=beam_fn_api_pb2.StateKey(
bag_user_state=beam_fn_api_pb2.StateKey.BagUserState(
ptransform_id=self._transform_id,
user_state_id=state_spec.name,
window=self._window_coder.encode(window),
key=self._key_coder.encode(key))),
value_coder=state_spec.coder)
if isinstance(state_spec, userstate.BagStateSpec):
return bag_state
else:
return CombiningValueRuntimeState(bag_state, state_spec.combine_fn)
else:
raise NotImplementedError(state_spec)
def commit(self):
for state in self._all_states.values():
state._commit()
def reset(self):
# TODO(BEAM-5428): Implement cross-bundle state caching.
self._all_states = {}
def memoize(func):
cache = {}
missing = object()
def wrapper(*args):
result = cache.get(args, missing)
if result is missing:
result = cache[args] = func(*args)
return result
return wrapper
def only_element(iterable):
element, = iterable
return element
class BundleProcessor(object):
"""A class for processing bundles of elements."""
def __init__(
self, process_bundle_descriptor, state_handler, data_channel_factory):
self.process_bundle_descriptor = process_bundle_descriptor
self.state_handler = state_handler
self.data_channel_factory = data_channel_factory
# TODO(robertwb): Figure out the correct prefix to use for output counters
# from StateSampler.
self.counter_factory = counters.CounterFactory()
self.state_sampler = statesampler.StateSampler(
'fnapi-step-%s' % self.process_bundle_descriptor.id,
self.counter_factory)
self.ops = self.create_execution_tree(self.process_bundle_descriptor)
for op in self.ops.values():
op.setup()
self.splitting_lock = threading.Lock()
def create_execution_tree(self, descriptor):
transform_factory = BeamTransformFactory(
descriptor, self.data_channel_factory, self.counter_factory,
self.state_sampler, self.state_handler)
def is_side_input(transform_proto, tag):
if transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn:
return tag in proto_utils.parse_Bytes(
transform_proto.spec.payload,
beam_runner_api_pb2.ParDoPayload).side_inputs
pcoll_consumers = collections.defaultdict(list)
for transform_id, transform_proto in descriptor.transforms.items():
for tag, pcoll_id in transform_proto.inputs.items():
if not is_side_input(transform_proto, tag):
pcoll_consumers[pcoll_id].append(transform_id)
@memoize
def get_operation(transform_id):
transform_consumers = {
tag: [get_operation(op) for op in pcoll_consumers[pcoll_id]]
for tag, pcoll_id
in descriptor.transforms[transform_id].outputs.items()
}
return transform_factory.create_operation(
transform_id, transform_consumers)
# Operations must be started (hence returned) in order.
@memoize
def topological_height(transform_id):
return 1 + max(
[0] +
[topological_height(consumer)
for pcoll in descriptor.transforms[transform_id].outputs.values()
for consumer in pcoll_consumers[pcoll]])
return collections.OrderedDict([
(transform_id, get_operation(transform_id))
for transform_id in sorted(
descriptor.transforms, key=topological_height, reverse=True)])
def reset(self):
self.counter_factory.reset()
self.state_sampler.reset()
# Side input caches.
for op in self.ops.values():
op.reset()
def process_bundle(self, instruction_id):
expected_inputs = []
for op in self.ops.values():
if isinstance(op, DataOutputOperation):
# TODO(robertwb): Is there a better way to pass the instruction id to
# the operation?
op.set_output_stream(op.data_channel.output_stream(
instruction_id, op.target))
elif isinstance(op, DataInputOperation):
# We must wait until we receive "end of stream" for each of these ops.
expected_inputs.append(op)
try:
execution_context = ExecutionContext()
self.state_sampler.start()
# Start all operations.
for op in reversed(self.ops.values()):
logging.debug('start %s', op)
op.execution_context = execution_context
op.start()
# Inject inputs from data plane.
data_channels = collections.defaultdict(list)
input_op_by_target = {}
for input_op in expected_inputs:
data_channels[input_op.data_channel].append(input_op.target)
# ignores input name
input_op_by_target[
input_op.target.primitive_transform_reference] = input_op
for data_channel, expected_targets in data_channels.items():
for data in data_channel.input_elements(
instruction_id, expected_targets):
input_op_by_target[
data.target.primitive_transform_reference
].process_encoded(data.data)
# Finish all operations.
for op in self.ops.values():
logging.debug('finish %s', op)
op.finish()
return ([self.delayed_bundle_application(op, residual)
for op, residual in execution_context.delayed_applications],
self.requires_finalization())
finally:
# Ensure any in-flight split attempts complete.
with self.splitting_lock:
pass
self.state_sampler.stop_if_still_running()
def finalize_bundle(self):
for op in self.ops.values():
op.finalize_bundle()
return beam_fn_api_pb2.FinalizeBundleResponse()
def requires_finalization(self):
return any(op.needs_finalization() for op in self.ops.values())
def try_split(self, bundle_split_request):
split_response = beam_fn_api_pb2.ProcessBundleSplitResponse()
with self.splitting_lock:
for op in self.ops.values():
if isinstance(op, DataInputOperation):
desired_split = bundle_split_request.desired_splits.get(
op.target.primitive_transform_reference)
if desired_split:
split = op.try_split(desired_split.fraction_of_remainder,
desired_split.estimated_input_elements)
if split:
(primary_end, element_primary, element_residual, residual_start,
) = split
if element_primary:
split_response.primary_roots.add().CopyFrom(
self.delayed_bundle_application(
*element_primary).application)
if element_residual:
split_response.residual_roots.add().CopyFrom(
self.delayed_bundle_application(*element_residual))
split_response.channel_splits.extend([
beam_fn_api_pb2.ProcessBundleSplitResponse.ChannelSplit(
ptransform_id=op.target.primitive_transform_reference,
input_id=op.target.name,
last_primary_element=primary_end,
first_residual_element=residual_start)])
return split_response
def delayed_bundle_application(self, op, deferred_remainder):
ptransform_id, main_input_tag, main_input_coder, outputs = op.input_info
# TODO(SDF): For non-root nodes, need main_input_coder + residual_coder.
element_and_restriction, watermark = deferred_remainder
if watermark:
proto_watermark = protobuf.Timestamp()
proto_watermark.FromMicroseconds(watermark.micros)
output_watermarks = {output: proto_watermark for output in outputs}
else:
output_watermarks = None
return beam_fn_api_pb2.DelayedBundleApplication(
application=beam_fn_api_pb2.BundleApplication(
ptransform_id=ptransform_id,
input_id=main_input_tag,
output_watermarks=output_watermarks,
element=main_input_coder.get_impl().encode_nested(
element_and_restriction)))
def metrics(self):
# DEPRECATED
return beam_fn_api_pb2.Metrics(
# TODO(robertwb): Rename to progress?
ptransforms={
transform_id:
self._fix_output_tags(transform_id, op.progress_metrics())
for transform_id, op in self.ops.items()})
def _fix_output_tags(self, transform_id, metrics):
# DEPRECATED
actual_output_tags = list(
self.process_bundle_descriptor.transforms[transform_id].outputs.keys())
# Outputs are still referred to by index, not by name, in many Operations.
# However, if there is exactly one output, we can fix up the name here.
def fix_only_output_tag(actual_output_tag, mapping):
if len(mapping) == 1:
fake_output_tag, count = only_element(list(mapping.items()))
if fake_output_tag != actual_output_tag:
del mapping[fake_output_tag]
mapping[actual_output_tag] = count
if len(actual_output_tags) == 1:
fix_only_output_tag(
actual_output_tags[0],
metrics.processed_elements.measured.output_element_counts)
fix_only_output_tag(
actual_output_tags[0],
metrics.active_elements.measured.output_element_counts)
return metrics
def monitoring_infos(self):
"""Returns the list of MonitoringInfos collected processing this bundle."""
# Construct a new dict first to remove duplciates.
all_monitoring_infos_dict = {}
for transform_id, op in self.ops.items():
for mi in op.monitoring_infos(transform_id).values():
fixed_mi = self._fix_output_tags_monitoring_info(transform_id, mi)
all_monitoring_infos_dict[monitoring_infos.to_key(fixed_mi)] = fixed_mi
infos_list = list(all_monitoring_infos_dict.values())
def inject_pcollection_into_element_count(monitoring_info):
"""
If provided metric is element count metric:
Finds relevant transform output info in current process_bundle_descriptor
and adds tag with PCOLLECTION_LABEL and pcollection_id into monitoring
info.
"""
if monitoring_info.urn == monitoring_infos.ELEMENT_COUNT_URN:
if not monitoring_infos.PTRANSFORM_LABEL in monitoring_info.labels:
return
ptransform_label = monitoring_info.labels[
monitoring_infos.PTRANSFORM_LABEL]
if not monitoring_infos.TAG_LABEL in monitoring_info.labels:
return
tag_label = monitoring_info.labels[monitoring_infos.TAG_LABEL]
if not ptransform_label in self.process_bundle_descriptor.transforms:
return
if not tag_label in self.process_bundle_descriptor.transforms[
ptransform_label].outputs:
return
pcollection_name = (self.process_bundle_descriptor
.transforms[ptransform_label].outputs[tag_label])
monitoring_info.labels[
monitoring_infos.PCOLLECTION_LABEL] = pcollection_name
# Cleaning up labels that are not in specification.
monitoring_info.labels.pop(monitoring_infos.PTRANSFORM_LABEL)
monitoring_info.labels.pop(monitoring_infos.TAG_LABEL)
for mi in infos_list:
inject_pcollection_into_element_count(mi)
return infos_list
def _fix_output_tags_monitoring_info(self, transform_id, monitoring_info):
actual_output_tags = list(
self.process_bundle_descriptor.transforms[transform_id].outputs.keys())
if ('TAG' in monitoring_info.labels and
monitoring_info.labels['TAG'] == 'ONLY_OUTPUT'):
if len(actual_output_tags) == 1:
monitoring_info.labels['TAG'] = actual_output_tags[0]
return monitoring_info
class ExecutionContext(object):
def __init__(self):
self.delayed_applications = []
class BeamTransformFactory(object):
"""Factory for turning transform_protos into executable operations."""
def __init__(self, descriptor, data_channel_factory, counter_factory,
state_sampler, state_handler):
self.descriptor = descriptor
self.data_channel_factory = data_channel_factory
self.counter_factory = counter_factory
self.state_sampler = state_sampler
self.state_handler = state_handler
self.context = pipeline_context.PipelineContext(
descriptor,
iterable_state_read=lambda token, element_coder_impl:
_StateBackedIterable(
state_handler,
beam_fn_api_pb2.StateKey(
runner=beam_fn_api_pb2.StateKey.Runner(key=token)),
element_coder_impl))
_known_urns = {}
@classmethod
def register_urn(cls, urn, parameter_type):
def wrapper(func):
cls._known_urns[urn] = func, parameter_type
return func
return wrapper
def create_operation(self, transform_id, consumers):
transform_proto = self.descriptor.transforms[transform_id]
if not transform_proto.unique_name:
logging.warn("No unique name set for transform %s" % transform_id)
transform_proto.unique_name = transform_id
creator, parameter_type = self._known_urns[transform_proto.spec.urn]
payload = proto_utils.parse_Bytes(
transform_proto.spec.payload, parameter_type)
return creator(self, transform_id, transform_proto, payload, consumers)
def get_coder(self, coder_id):
if coder_id not in self.descriptor.coders:
raise KeyError("No such coder: %s" % coder_id)
coder_proto = self.descriptor.coders[coder_id]
if coder_proto.spec.spec.urn:
return self.context.coders.get_by_id(coder_id)
else:
# No URN, assume cloud object encoding json bytes.
return operation_specs.get_coder_from_spec(
json.loads(coder_proto.spec.spec.payload.decode('utf-8')))
def get_windowed_coder(self, pcoll_id):
coder = self.get_coder(self.descriptor.pcollections[pcoll_id].coder_id)
# TODO(robertwb): Remove this condition once all runners are consistent.
if not isinstance(coder, WindowedValueCoder):
windowing_strategy = self.descriptor.windowing_strategies[
self.descriptor.pcollections[pcoll_id].windowing_strategy_id]
return WindowedValueCoder(
coder, self.get_coder(windowing_strategy.window_coder_id))
else:
return coder
def get_output_coders(self, transform_proto):
return {
tag: self.get_windowed_coder(pcoll_id)
for tag, pcoll_id in transform_proto.outputs.items()
}
def get_only_output_coder(self, transform_proto):
return only_element(self.get_output_coders(transform_proto).values())
def get_input_coders(self, transform_proto):
return {
tag: self.get_windowed_coder(pcoll_id)
for tag, pcoll_id in transform_proto.inputs.items()
}
def get_only_input_coder(self, transform_proto):
return only_element(list(self.get_input_coders(transform_proto).values()))
# TODO(robertwb): Update all operations to take these in the constructor.
@staticmethod
def augment_oldstyle_op(op, step_name, consumers, tag_list=None):
op.step_name = step_name
for tag, op_consumers in consumers.items():
for consumer in op_consumers:
op.add_receiver(consumer, tag_list.index(tag) if tag_list else 0)
return op
class TimerConsumer(operations.Operation):
def __init__(self, timer_tag, do_op):
self._timer_tag = timer_tag
self._do_op = do_op
def process(self, windowed_value):
self._do_op.process_timer(self._timer_tag, windowed_value)
@BeamTransformFactory.register_urn(
DATA_INPUT_URN, beam_fn_api_pb2.RemoteGrpcPort)
def create(factory, transform_id, transform_proto, grpc_port, consumers):
# Timers are the one special case where we don't want to call the
# (unlabeled) operation.process() method, which we detect here.
# TODO(robertwb): Consider generalizing if there are any more cases.
output_pcoll = only_element(transform_proto.outputs.values())
output_consumers = only_element(consumers.values())
if (len(output_consumers) == 1
and isinstance(only_element(output_consumers), operations.DoOperation)):
do_op = only_element(output_consumers)
for tag, pcoll_id in do_op.timer_inputs.items():
if pcoll_id == output_pcoll:
output_consumers[:] = [TimerConsumer(tag, do_op)]
break
target = beam_fn_api_pb2.Target(
primitive_transform_reference=transform_id,
name=only_element(list(transform_proto.outputs.keys())))
if grpc_port.coder_id:
output_coder = factory.get_coder(grpc_port.coder_id)
else:
logging.error(
'Missing required coder_id on grpc_port for %s; '
'using deprecated fallback.',
transform_id)
output_coder = factory.get_only_output_coder(transform_proto)
return DataInputOperation(
transform_proto.unique_name,
transform_proto.unique_name,
consumers,
factory.counter_factory,
factory.state_sampler,
output_coder,
input_target=target,
data_channel=factory.data_channel_factory.create_data_channel(grpc_port))
@BeamTransformFactory.register_urn(
DATA_OUTPUT_URN, beam_fn_api_pb2.RemoteGrpcPort)
def create(factory, transform_id, transform_proto, grpc_port, consumers):
target = beam_fn_api_pb2.Target(
primitive_transform_reference=transform_id,
name=only_element(list(transform_proto.inputs.keys())))
if grpc_port.coder_id:
output_coder = factory.get_coder(grpc_port.coder_id)
else:
logging.error(
'Missing required coder_id on grpc_port for %s; '
'using deprecated fallback.',
transform_id)
output_coder = factory.get_only_input_coder(transform_proto)
return DataOutputOperation(
transform_proto.unique_name,
transform_proto.unique_name,
consumers,
factory.counter_factory,
factory.state_sampler,
output_coder,
target=target,
data_channel=factory.data_channel_factory.create_data_channel(grpc_port))
@BeamTransformFactory.register_urn(OLD_DATAFLOW_RUNNER_HARNESS_READ_URN, None)
def create(factory, transform_id, transform_proto, parameter, consumers):
# The Dataflow runner harness strips the base64 encoding.
source = pickler.loads(base64.b64encode(parameter))
spec = operation_specs.WorkerRead(
iobase.SourceBundle(1.0, source, None, None),
[factory.get_only_output_coder(transform_proto)])
return factory.augment_oldstyle_op(
operations.ReadOperation(
transform_proto.unique_name,
spec,
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
common_urns.deprecated_primitives.READ.urn, beam_runner_api_pb2.ReadPayload)
def create(factory, transform_id, transform_proto, parameter, consumers):
source = iobase.SourceBase.from_runner_api(parameter.source, factory.context)
spec = operation_specs.WorkerRead(
iobase.SourceBundle(1.0, source, None, None),
[WindowedValueCoder(source.default_output_coder())])
return factory.augment_oldstyle_op(
operations.ReadOperation(
transform_proto.unique_name,
spec,
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
python_urns.IMPULSE_READ_TRANSFORM, beam_runner_api_pb2.ReadPayload)
def create(factory, transform_id, transform_proto, parameter, consumers):
return operations.ImpulseReadOperation(
transform_proto.unique_name,
factory.counter_factory,
factory.state_sampler,
consumers,
iobase.SourceBase.from_runner_api(
parameter.source, factory.context),
factory.get_only_output_coder(transform_proto))
@BeamTransformFactory.register_urn(OLD_DATAFLOW_RUNNER_HARNESS_PARDO_URN, None)
def create(factory, transform_id, transform_proto, serialized_fn, consumers):
return _create_pardo_operation(
factory, transform_id, transform_proto, consumers, serialized_fn)
@BeamTransformFactory.register_urn(
common_urns.sdf_components.PAIR_WITH_RESTRICTION.urn,
beam_runner_api_pb2.ParDoPayload)
def create(*args):
class PairWithRestriction(beam.DoFn):
def __init__(self, fn, restriction_provider):
self.restriction_provider = restriction_provider
# An unused window is requested to force explosion of multi-window
# WindowedValues.
def process(
self, element, _unused_window=beam.DoFn.WindowParam, *args, **kwargs):
# TODO(SDF): Do we want to allow mutation of the element?
# (E.g. it could be nice to shift bulky description to the portion
# that can be distributed.)
yield element, self.restriction_provider.initial_restriction(element)
return _create_sdf_operation(PairWithRestriction, *args)
@BeamTransformFactory.register_urn(
common_urns.sdf_components.SPLIT_AND_SIZE_RESTRICTIONS.urn,
beam_runner_api_pb2.ParDoPayload)
def create(*args):
class SplitAndSizeRestrictions(beam.DoFn):
def __init__(self, fn, restriction_provider):
self.restriction_provider = restriction_provider
def process(self, element_restriction, *args, **kwargs):
element, restriction = element_restriction
for part in self.restriction_provider.split(element, restriction):
yield ((element, part),
self.restriction_provider.restriction_size(element, part))
return _create_sdf_operation(SplitAndSizeRestrictions, *args)
@BeamTransformFactory.register_urn(
common_urns.sdf_components.PROCESS_SIZED_ELEMENTS_AND_RESTRICTIONS.urn,
beam_runner_api_pb2.ParDoPayload)
def create(factory, transform_id, transform_proto, parameter, consumers):
assert parameter.do_fn.spec.urn == python_urns.PICKLED_DOFN_INFO
serialized_fn = parameter.do_fn.spec.payload
return _create_pardo_operation(
factory, transform_id, transform_proto, consumers,
serialized_fn, parameter,
operation_cls=operations.SdfProcessSizedElements)
def _create_sdf_operation(
proxy_dofn,
factory, transform_id, transform_proto, parameter, consumers):
dofn_data = pickler.loads(parameter.do_fn.spec.payload)
dofn = dofn_data[0]
restriction_provider = common.DoFnSignature(dofn).get_restriction_provider()
serialized_fn = pickler.dumps(
(proxy_dofn(dofn, restriction_provider),) + dofn_data[1:])
return _create_pardo_operation(
factory, transform_id, transform_proto, consumers,
serialized_fn, parameter)
@BeamTransformFactory.register_urn(
common_urns.primitives.PAR_DO.urn, beam_runner_api_pb2.ParDoPayload)
def create(factory, transform_id, transform_proto, parameter, consumers):
assert parameter.do_fn.spec.urn == python_urns.PICKLED_DOFN_INFO
serialized_fn = parameter.do_fn.spec.payload
return _create_pardo_operation(
factory, transform_id, transform_proto, consumers,
serialized_fn, parameter)
def _create_pardo_operation(
factory, transform_id, transform_proto, consumers,
serialized_fn, pardo_proto=None, operation_cls=operations.DoOperation):
if pardo_proto and pardo_proto.side_inputs:
input_tags_to_coders = factory.get_input_coders(transform_proto)
tagged_side_inputs = [
(tag, beam.pvalue.SideInputData.from_runner_api(si, factory.context))
for tag, si in pardo_proto.side_inputs.items()]
tagged_side_inputs.sort(
key=lambda tag_si: int(re.match('side([0-9]+)(-.*)?$',
tag_si[0]).group(1)))
side_input_maps = [
StateBackedSideInputMap(
factory.state_handler,
transform_id,
tag,
si,
input_tags_to_coders[tag])
for tag, si in tagged_side_inputs]
else:
side_input_maps = []
output_tags = list(transform_proto.outputs.keys())
# Hack to match out prefix injected by dataflow runner.
def mutate_tag(tag):
if 'None' in output_tags:
if tag == 'None':
return 'out'
else:
return 'out_' + tag
else:
return tag
dofn_data = pickler.loads(serialized_fn)
if not dofn_data[-1]:
# Windowing not set.
if pardo_proto:
other_input_tags = set.union(
set(pardo_proto.side_inputs), set(pardo_proto.timer_specs))
else:
other_input_tags = ()
pcoll_id, = [pcoll for tag, pcoll in transform_proto.inputs.items()
if tag not in other_input_tags]
windowing = factory.context.windowing_strategies.get_by_id(
factory.descriptor.pcollections[pcoll_id].windowing_strategy_id)
serialized_fn = pickler.dumps(dofn_data[:-1] + (windowing,))
if pardo_proto and (pardo_proto.timer_specs or pardo_proto.state_specs
or pardo_proto.splittable):
main_input_coder = None
timer_inputs = {}
for tag, pcoll_id in transform_proto.inputs.items():
if tag in pardo_proto.timer_specs:
timer_inputs[tag] = pcoll_id
elif tag in pardo_proto.side_inputs:
pass
else:
# Must be the main input
assert main_input_coder is None
main_input_tag = tag
main_input_coder = factory.get_windowed_coder(pcoll_id)
assert main_input_coder is not None
if pardo_proto.timer_specs or pardo_proto.state_specs:
user_state_context = FnApiUserStateContext(
factory.state_handler,
transform_id,
main_input_coder.key_coder(),
main_input_coder.window_coder,
timer_specs=pardo_proto.timer_specs)
else:
user_state_context = None
else:
user_state_context = None
timer_inputs = None
output_coders = factory.get_output_coders(transform_proto)
spec = operation_specs.WorkerDoFn(
serialized_fn=serialized_fn,
output_tags=[mutate_tag(tag) for tag in output_tags],
input=None,
side_inputs=None, # Fn API uses proto definitions and the Fn State API
output_coders=[output_coders[tag] for tag in output_tags])
result = factory.augment_oldstyle_op(
operation_cls(
transform_proto.unique_name,
spec,
factory.counter_factory,
factory.state_sampler,
side_input_maps,
user_state_context,
timer_inputs=timer_inputs),
transform_proto.unique_name,
consumers,
output_tags)
if pardo_proto and pardo_proto.splittable:
result.input_info = (
transform_id, main_input_tag, main_input_coder,
transform_proto.outputs.keys())
return result
def _create_simple_pardo_operation(
factory, transform_id, transform_proto, consumers, dofn):
serialized_fn = pickler.dumps((dofn, (), {}, [], None))
return _create_pardo_operation(
factory, transform_id, transform_proto, consumers, serialized_fn)
@BeamTransformFactory.register_urn(
common_urns.primitives.ASSIGN_WINDOWS.urn,
beam_runner_api_pb2.WindowingStrategy)
def create(factory, transform_id, transform_proto, parameter, consumers):
class WindowIntoDoFn(beam.DoFn):
def __init__(self, windowing):
self.windowing = windowing
def process(self, element, timestamp=beam.DoFn.TimestampParam,
window=beam.DoFn.WindowParam):
new_windows = self.windowing.windowfn.assign(
WindowFn.AssignContext(timestamp, element=element, window=window))
yield WindowedValue(element, timestamp, new_windows)
from apache_beam.transforms.core import Windowing
from apache_beam.transforms.window import WindowFn, WindowedValue
windowing = Windowing.from_runner_api(parameter, factory.context)
return _create_simple_pardo_operation(
factory, transform_id, transform_proto, consumers,
WindowIntoDoFn(windowing))
@BeamTransformFactory.register_urn(IDENTITY_DOFN_URN, None)
def create(factory, transform_id, transform_proto, unused_parameter, consumers):
return factory.augment_oldstyle_op(
operations.FlattenOperation(
transform_proto.unique_name,
operation_specs.WorkerFlatten(
None, [factory.get_only_output_coder(transform_proto)]),
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_PGBKCV.urn,
beam_runner_api_pb2.CombinePayload)
def create(factory, transform_id, transform_proto, payload, consumers):
# TODO: Combine side inputs.
serialized_combine_fn = pickler.dumps(
(beam.CombineFn.from_runner_api(payload.combine_fn, factory.context),
[], {}))
return factory.augment_oldstyle_op(
operations.PGBKCVOperation(
transform_proto.unique_name,
operation_specs.WorkerPartialGroupByKey(
serialized_combine_fn,
None,
[factory.get_only_output_coder(transform_proto)]),
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_MERGE_ACCUMULATORS.urn,
beam_runner_api_pb2.CombinePayload)
def create(factory, transform_id, transform_proto, payload, consumers):
return _create_combine_phase_operation(
factory, transform_proto, payload, consumers, 'merge')
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_EXTRACT_OUTPUTS.urn,
beam_runner_api_pb2.CombinePayload)
def create(factory, transform_id, transform_proto, payload, consumers):
return _create_combine_phase_operation(
factory, transform_proto, payload, consumers, 'extract')
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_PER_KEY_PRECOMBINE.urn,
beam_runner_api_pb2.CombinePayload)
def create(factory, transform_id, transform_proto, payload, consumers):
serialized_combine_fn = pickler.dumps(
(beam.CombineFn.from_runner_api(payload.combine_fn, factory.context),
[], {}))
return factory.augment_oldstyle_op(
operations.PGBKCVOperation(
transform_proto.unique_name,
operation_specs.WorkerPartialGroupByKey(
serialized_combine_fn,
None,
[factory.get_only_output_coder(transform_proto)]),
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_PER_KEY_MERGE_ACCUMULATORS.urn,
beam_runner_api_pb2.CombinePayload)
def create(factory, transform_id, transform_proto, payload, consumers):
return _create_combine_phase_operation(
factory, transform_proto, payload, consumers, 'merge')
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_PER_KEY_EXTRACT_OUTPUTS.urn,
beam_runner_api_pb2.CombinePayload)
def create(factory, transform_id, transform_proto, payload, consumers):
return _create_combine_phase_operation(
factory, transform_proto, payload, consumers, 'extract')
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_GROUPED_VALUES.urn,
beam_runner_api_pb2.CombinePayload)
def create(factory, transform_id, transform_proto, payload, consumers):
return _create_combine_phase_operation(
factory, transform_proto, payload, consumers, 'all')
def _create_combine_phase_operation(
factory, transform_proto, payload, consumers, phase):
serialized_combine_fn = pickler.dumps(
(beam.CombineFn.from_runner_api(payload.combine_fn, factory.context),
[], {}))
return factory.augment_oldstyle_op(
operations.CombineOperation(
transform_proto.unique_name,
operation_specs.WorkerCombineFn(
serialized_combine_fn,
phase,
None,
[factory.get_only_output_coder(transform_proto)]),
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(common_urns.primitives.FLATTEN.urn, None)
def create(factory, transform_id, transform_proto, unused_parameter, consumers):
return factory.augment_oldstyle_op(
operations.FlattenOperation(
transform_proto.unique_name,
operation_specs.WorkerFlatten(
None,
[factory.get_only_output_coder(transform_proto)]),
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
common_urns.primitives.MAP_WINDOWS.urn,
beam_runner_api_pb2.SdkFunctionSpec)
def create(factory, transform_id, transform_proto, mapping_fn_spec, consumers):
assert mapping_fn_spec.spec.urn == python_urns.PICKLED_WINDOW_MAPPING_FN
window_mapping_fn = pickler.loads(mapping_fn_spec.spec.payload)
class MapWindows(beam.DoFn):
def process(self, element):
key, window = element
return [(key, window_mapping_fn(window))]
return _create_simple_pardo_operation(
factory, transform_id, transform_proto, consumers,
MapWindows())
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.