ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40a44e61b4424bdfdb26e398c6bb11c2c41c7c3 | # -*- coding: utf-8 -*-
from typing import List
import attr
import afwf
import requests
from bs4 import BeautifulSoup
from ..cache import cache
@attr.define
class Handler(afwf.Handler):
@cache.memoize(expire=60)
def get_all_python_version(self) -> List[str]:
invalid_versions = ["patches", "."]
url = "https://github.com/pyenv/pyenv/tree/master/plugins/python-build/share/python-build"
res = requests.get(url)
soup = BeautifulSoup(res.text, "html.parser")
div = soup.find("div", class_="Box mb-3")
versions = list()
for a in div.find_all("a", class_="js-navigation-open Link--primary"):
version = a.text
if version not in invalid_versions:
versions.append(version)
return versions
def lower_level_api(self, query: str) -> afwf.ScriptFilter:
if query == "error":
raise ValueError("query cannot be 'error'!")
versions = self.get_all_python_version()
filtered_versions = [
version
for version in versions
if query in version
]
filtered_versions.sort()
filtered_versions = filtered_versions[:50]
sf = afwf.ScriptFilter()
for version in filtered_versions:
url = f"https://github.com/pyenv/pyenv/blob/master/plugins/python-build/share/python-build/{version}"
item = afwf.Item(
title=version,
autocomplete=version,
arg=url,
)
item.open_url(url=url)
sf.items.append(item)
if len(sf.items) == 0:
sf.items.append(afwf.Item(
title=f"{query!r} doesn't match any Python version!",
subtitle="Open https://github.com/pyenv/pyenv/tree/master/plugins/python-build/share/python-build",
arg="https://github.com/pyenv/pyenv/tree/master/plugins/python-build/share/python-build",
))
return sf
def handler(self, query: str) -> afwf.ScriptFilter:
return self.lower_level_api(query=query)
handler = Handler(id="python_version")
|
py | b40a44e809c23473149577fc2c2bd7c94f401633 | from weldx.asdf.types import WeldxType
from weldx.measurement import GenericEquipment
__all__ = ["GenericEquipment", "GenericEquipmentType"]
class GenericEquipmentType(WeldxType):
"""Serialization class for generic-equipment."""
name = "equipment/generic_equipment"
version = "1.0.0"
types = [GenericEquipment]
requires = ["weldx"]
handle_dynamic_subclasses = True
@classmethod
def to_tree(cls, node: GenericEquipment, ctx):
"""convert to tagged tree and remove all None entries from node dictionary"""
tree = node.__dict__
return tree
@classmethod
def from_tree(cls, tree, ctx):
if "sources" not in tree:
tree["sources"] = None
obj = GenericEquipment(**tree)
return obj
|
py | b40a45385032dc08bd013bd0ef74ce9e042a82c4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# File Name : ./thepysec/john/__init__.py
#
# Creation Date : Sat 16 Mar 2019 04:09:41 PM EET (16:09)
#
# Last Modified : Sat 16 Mar 2019 04:10:02 PM EET (16:10)
#
# ==============================================================================
|
py | b40a457e010e838110574c30989d0a271ae234c1 | import serial
import time
R_fixo_conectado_a_VCC = 220
R_anterior = 0
with serial.Serial("/dev/ttyUSB0", 115200) as ser:
while True:
n = ser.read()[0] + (ser.read()[0] << 8)
R = R_fixo_conectado_a_VCC*n/(1024-n)
if (abs(R_anterior-R)>0.1*R):
print("Resistência Conectada ao Ground: {:.2f} omhs".format(R))
R_anterior = R
|
py | b40a45c702b8476f9c0fb14b03b75a2bbddca459 | # ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
# Hook for Jedi, a static analysis tool https://pypi.org/project/jedi/
from PyInstaller.utils.hooks import collect_data_files
datas = collect_data_files('jedi')
|
py | b40a473bcb141c1c19828f1f34fa3540e75dc906 | import argparse
import os
from pathlib import Path
from typing import (
Iterable,
Tuple,
Union,
)
from eth_utils import (
decode_hex,
to_dict,
)
from eth_keys import keys
from eth_keys.datatypes import PrivateKey
from p2p.constants import DEFAULT_MAX_PEERS
from trinity.constants import (
MAINNET_NETWORK_ID,
ROPSTEN_NETWORK_ID,
SYNC_LIGHT,
)
DEFAULT_DATA_DIRS = {
ROPSTEN_NETWORK_ID: 'ropsten',
MAINNET_NETWORK_ID: 'mainnet',
}
#
# Filesystem path utils
#
def get_local_data_dir(chain_name: str, trinity_root_dir: Path) -> Path:
"""
Returns the base directory path where data for a given chain will be stored.
"""
try:
return Path(os.environ['TRINITY_DATA_DIR'])
except KeyError:
return trinity_root_dir / chain_name
def get_data_dir_for_network_id(network_id: int, trinity_root_dir: Path) -> Path:
"""
Returns the data directory for the chain associated with the given network
id. If the network id is unknown, raises a KeyError.
"""
try:
return get_local_data_dir(DEFAULT_DATA_DIRS[network_id], trinity_root_dir)
except KeyError:
raise KeyError(f"Unknown network id: `{network_id}`")
NODEKEY_FILENAME = 'nodekey'
def get_nodekey_path(data_dir: Path) -> Path:
"""
Returns the path to the private key used for devp2p connections.
"""
return Path(os.environ.get(
'TRINITY_NODEKEY',
str(data_dir / NODEKEY_FILENAME),
))
DATABASE_SOCKET_FILENAME = 'db.ipc'
def get_database_socket_path(data_dir: Path) -> Path:
"""
Returns the path to the private key used for devp2p connections.
We're still returning 'str' here on ipc-related path because an issue with
multi-processing not being able to interpret 'Path' objects correctly.
"""
return Path(os.environ.get(
'TRINITY_DATABASE_IPC',
data_dir / DATABASE_SOCKET_FILENAME,
))
JSONRPC_SOCKET_FILENAME = 'jsonrpc.ipc'
def get_jsonrpc_socket_path(data_dir: Path) -> Path:
"""
Returns the path to the ipc socket for the JSON-RPC server.
We're still returning 'str' here on ipc-related path because an issue with
multi-processing not being able to interpret 'Path' objects correctly.
"""
return Path(os.environ.get(
'TRINITY_JSONRPC_IPC',
data_dir / JSONRPC_SOCKET_FILENAME,
))
#
# Nodekey loading
#
def load_nodekey(nodekey_path: Path) -> PrivateKey:
with nodekey_path.open('rb') as nodekey_file:
nodekey_raw = nodekey_file.read()
nodekey = keys.PrivateKey(nodekey_raw)
return nodekey
@to_dict
def construct_trinity_config_params(
args: argparse.Namespace) -> Iterable[Tuple[str, Union[int, str, Tuple[str, ...]]]]:
"""
Helper function for constructing the kwargs to initialize a TrinityConfig object.
"""
yield 'network_id', args.network_id
yield 'use_discv5', args.discv5
if args.trinity_root_dir is not None:
yield 'trinity_root_dir', args.trinity_root_dir
if args.genesis is not None:
if args.data_dir is None:
raise ValueError("When providing a custom genesis, must also provide a data-dir")
yield 'genesis_config', args.genesis
if args.data_dir is not None:
yield 'data_dir', args.data_dir
if args.nodekey is not None:
if os.path.isfile(args.nodekey):
yield 'nodekey_path', args.nodekey
else:
yield 'nodekey', decode_hex(args.nodekey)
if args.max_peers is not None:
yield 'max_peers', args.max_peers
else:
yield 'max_peers', _default_max_peers(args.sync_mode)
if args.port is not None:
yield 'port', args.port
if args.preferred_nodes is None:
yield 'preferred_nodes', tuple()
else:
yield 'preferred_nodes', tuple(args.preferred_nodes)
def _default_max_peers(sync_mode: str) -> int:
if sync_mode == SYNC_LIGHT:
return DEFAULT_MAX_PEERS // 2
else:
return DEFAULT_MAX_PEERS
|
py | b40a4866d24d84528bfe2231870eba57dabdfdaf | from rest_framework import serializers
class FaceSerializer(serializers.Serializer):
"""
Serializer for the incoming face request.
"""
image = serializers.ImageField()
class ImagePathSerializer(serializers.Serializer):
"""
Serializer for an authenticated message request.
"""
path = serializers.CharField()
class ImageBase64Serializer(serializers.Serializer):
"""
Serializer for an authenticated message request.
"""
path = serializers.CharField()
base64 = serializers.CharField() |
py | b40a4a4611dc80194abcb7ad8ae10dfa603a0547 | #!/usr/bin/python
"""Legacy Historian script for analyzing Android bug reports."""
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TO USE: (see also usage() below)
# adb shell dumpsys batterystats --enable full-wake-history (post-KitKat only)
# adb shell dumpsys batterystats --reset
# Optionally start powermonitor logging:
# For example, if using a Monsoon:
# if device/host clocks are not synced, run historian.py -v
# cts/tools/utils/monsoon.py --serialno 2294 --hz 1 --samples 100000 \
# -timestamp | tee monsoon.out
# ...let device run a while...
# stop monsoon.py
# adb bugreport > bugreport.txt
# ./historian.py -p monsoon.out bugreport.txt
import collections
import datetime
import fileinput
import getopt
import re
import StringIO
import subprocess
import sys
import time
POWER_DATA_FILE_TIME_OFFSET = 0 # deal with any clock mismatch.
BLAME_CATEGORY = "wake_lock_in" # category to assign power blame to.
ROWS_TO_SUMMARIZE = ["wake_lock", "running"] # -s: summarize these rows
getopt_debug = 0
getopt_bill_extra_secs = 0
getopt_power_quanta = 15 # slice powermonitor data this many seconds,
# to avoid crashing visualizer
getopt_power_data_file = False
getopt_proc_name = ""
getopt_highlight_category = ""
getopt_show_all_wakelocks = False
getopt_sort_by_power = True
getopt_summarize_pct = -1
getopt_report_filename = ""
getopt_generate_chart_only = False
getopt_disable_chart_drawing = False
def usage():
"""Print usage of the script."""
print "\nUsage: %s [OPTIONS] [FILE]\n" % sys.argv[0]
print " -a: show all wakelocks (don't abbreviate system wakelocks)"
print " -c: disable drawing of chart"
print " -d: debug mode, output debugging info for this program"
print (" -e TIME: extend billing an extra TIME seconds after each\n"
" wakelock, or until the next wakelock is seen. Useful for\n"
" accounting for modem power overhead.")
print " -h: print this message."
print (" -m: generate output that can be embedded in an existing page.\n"
" HTML header and body tags are not outputted.")
print (" -n [CATEGORY=]PROC: output another row containing only processes\n"
" whose name matches uid of PROC in CATEGORY.\n"
" If CATEGORY is not specified, search in wake_lock_in.")
print (" -p FILE: analyze FILE containing power data. Format per\n"
" line: <timestamp in epoch seconds> <amps>")
print (" -q TIME: quantize data on power row in buckets of TIME\n"
" seconds (default %d)" % getopt_power_quanta)
print " -r NAME: report input file name as NAME in HTML."
print (" -s PCT: summarize certain useful rows with additional rows\n"
" showing percent time spent over PCT% in each.")
print " -t: sort power report by wakelock duration instead of charge"
print " -v: synchronize device time before collecting power data"
print "\n"
sys.exit(1)
def parse_time(s, fmt):
"""Parses a human readable duration string into milliseconds.
Takes a human readable duration string like '1d2h3m4s5ms' and returns
the equivalent in milliseconds.
Args:
s: Duration string
fmt: A re object to parse the string
Returns:
A number indicating the duration in milliseconds.
"""
if s == "0": return 0.0
p = re.compile(fmt)
match = p.search(s)
try:
d = match.groupdict()
except IndexError:
return -1.0
ret = 0.0
if d["day"]: ret += float(d["day"])*60*60*24
if d["hrs"]: ret += float(d["hrs"])*60*60
if d["min"]: ret += float(d["min"])*60
if d["sec"]: ret += float(d["sec"])
if d["ms"]: ret += float(d["ms"])/1000
return ret
def time_float_to_human(t, show_complete_time):
if show_complete_time:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
else:
return time.strftime("%H:%M:%S", time.localtime(t))
def abbrev_timestr(s):
"""Chop milliseconds off of a time string, if present."""
arr = s.split("s")
if len(arr) < 3: return "0s"
return arr[0]+"s"
def timestr_to_jsdate(timestr):
return "new Date(%s * 1000)" % timestr
def format_time(delta_time):
"""Return a time string representing time past since initial event."""
if not delta_time:
return str(0)
timestr = "+"
datet = datetime.datetime.utcfromtimestamp(delta_time)
if delta_time > 24 * 60 * 60:
timestr += str(datet.day - 1) + datet.strftime("d%Hh%Mm%Ss")
elif delta_time > 60 * 60:
timestr += datet.strftime("%Hh%Mm%Ss").lstrip("0")
elif delta_time > 60:
timestr += datet.strftime("%Mm%Ss").lstrip("0")
elif delta_time > 1:
timestr += datet.strftime("%Ss").lstrip("0")
ms = datet.microsecond / 1000.0
timestr += "%03dms" % ms
return timestr
def format_duration(dur_ms):
"""Return a time string representing the duration in human readable format."""
if not dur_ms:
return "0ms"
ms = dur_ms % 1000
dur_ms = (dur_ms - ms) / 1000
secs = dur_ms % 60
dur_ms = (dur_ms - secs) / 60
mins = dur_ms % 60
hrs = (dur_ms - mins) / 60
out = ""
if hrs > 0:
out += "%dh" % hrs
if mins > 0:
out += "%dm" % mins
if secs > 0:
out += "%ds" % secs
if ms > 0 or not out:
out += "%dms" % ms
return out
def get_event_category(e):
e = e.lstrip("+-")
earr = e.split("=")
return earr[0]
def get_quoted_region(e):
e = e.split("\"")[1]
return e
def get_after_equal(e):
e = e.split("=")[1]
return e
def get_wifi_suppl_state(e):
try:
e = get_after_equal(e)
return e.split("(")[0]
except IndexError:
return ""
def get_event_subcat(cat, e):
"""Get subcategory of an category from an event string.
Subcategory can be use to distinguish simultaneous entities
within one category. To track possible concurrent instances,
add category name to concurrent_cat. Default is to track
events using only category name.
Args:
cat: Category name
e: Event name
Returns:
A string that is the subcategory of the event. Returns
the substring after category name if not empty and cat
is one of the categories tracked by concurrent_cat.
Default subcategory is the empty string.
"""
concurrent_cat = {"wake_lock_in", "sync", "top", "job", "conn"}
if cat in concurrent_cat:
try:
return get_after_equal(e)
except IndexError:
pass
return ""
def get_proc_pair(e):
if ":" in e:
proc_pair = get_after_equal(e)
return proc_pair.split(":", 1)
else:
return ("", "")
def as_to_mah(a):
return a * 1000 / 60 / 60
def apply_fn_over_range(fn, start_time, end_time, arglist):
"""Apply a given function per second quanta over a time range.
Args:
fn: The function to apply
start_time: The starting time of the whole duration
end_time: The ending time of the whole duration
arglist: Additional argument list
Returns:
A list of results generated by applying the function
over the time range.
"""
results = []
cursor = start_time
while cursor < end_time:
cursor_int = int(cursor)
next_cursor = float(cursor_int + 1)
if next_cursor > end_time: next_cursor = end_time
time_this_quanta = next_cursor - cursor
results.append(fn(cursor_int, time_this_quanta, *arglist))
cursor = next_cursor
return results
def space_escape(match):
value = match.group()
p = re.compile(r"\s+")
return p.sub("_", value)
def parse_reset_time(line):
line = line.strip()
line = line.split("RESET:TIME: ", 1)[1]
st = time.strptime(line, "%Y-%m-%d-%H-%M-%S")
return time.mktime(st)
def is_file_legacy_mode(input_file):
"""Autodetect legacy (K and earlier) format."""
detection_on = False
for line in fileinput.input(input_file):
if not detection_on and line.startswith("Battery History"):
detection_on = True
if not detection_on:
continue
split_line = line.split()
if not split_line:
continue
line_time = split_line[0]
if "+" not in line_time and "-" not in line_time:
continue
fileinput.close()
return line_time[0] == "-"
return False
def is_emit_event(e):
return e[0] != "+"
def is_standalone_event(e):
return not (e[0] == "+" or e[0] == "-")
def is_proc_event(e):
return e.startswith("+proc")
def autovivify():
"""Returns a multidimensional dict."""
return collections.defaultdict(autovivify)
def swap(swap_list, first, second):
swap_list[first], swap_list[second] = swap_list[second], swap_list[first]
def add_emit_event(emit_dict, cat, name, start, end):
"""Saves a new event into the dictionary that will be visualized."""
newevent = (name, int(start), int(end))
if end < start:
print "BUG: end time before start time: %s %s %s<br>" % (name,
start,
end)
else:
if getopt_debug:
print "Stored emitted event: %s<br>" % str(newevent)
if cat in emit_dict:
emit_dict[cat].append(newevent)
else:
emit_dict[cat] = [newevent]
def sync_time():
subprocess.call(["adb", "root"])
subprocess.call(["sleep", "3"])
start_time = int(time.time())
while int(time.time()) == start_time:
pass
curr_time = time.strftime("%Y%m%d.%H%M%S", time.localtime())
subprocess.call(["adb", "shell", "date", "-s", curr_time])
sys.exit(0)
def parse_search_option(cmd):
global getopt_proc_name, getopt_highlight_category
if "=" in cmd:
getopt_highlight_category = cmd.split("=")[0]
getopt_proc_name = cmd.split("=")[1]
else:
getopt_highlight_category = "wake_lock_in"
getopt_proc_name = cmd
def parse_argv():
"""Parse argument and set up globals."""
global getopt_debug, getopt_bill_extra_secs, getopt_power_quanta
global getopt_sort_by_power, getopt_power_data_file
global getopt_summarize_pct, getopt_show_all_wakelocks
global getopt_report_filename
global getopt_generate_chart_only
global getopt_disable_chart_drawing
try:
opts, argv_rest = getopt.getopt(sys.argv[1:],
"acde:hmn:p:q:r:s:tv", ["help"])
except getopt.GetoptError as err:
print "<pre>\n"
print str(err)
usage()
try:
for o, a in opts:
if o == "-a": getopt_show_all_wakelocks = True
if o == "-c": getopt_disable_chart_drawing = True
if o == "-d": getopt_debug = True
if o == "-e": getopt_bill_extra_secs = int(a)
if o in ("-h", "--help"): usage()
if o == "-m": getopt_generate_chart_only = True
if o == "-n": parse_search_option(a)
if o == "-p": getopt_power_data_file = a
if o == "-q": getopt_power_quanta = int(a)
if o == "-r": getopt_report_filename = str(a)
if o == "-s": getopt_summarize_pct = int(a)
if o == "-t": getopt_sort_by_power = False
if o == "-v": sync_time()
except ValueError as err:
print str(err)
usage()
if not argv_rest:
usage()
return argv_rest
class Printer(object):
"""Organize and render the visualizer."""
_default_color = "#4070cf"
# -n option is represented by "highlight". All the other names specified
# in _print_setting are the same as category names.
_print_setting = [
("battery_level", "#4070cf"),
("plugged", "#2e8b57"),
("screen", "#cbb69d"),
("top", "#dc3912"),
("sync", "#9900aa"),
("wake_lock_pct", "#6fae11"),
("wake_lock", "#cbb69d"),
("highlight", "#4070cf"),
("running_pct", "#6fae11"),
("running", "#990099"),
("wake_reason", "#b82e2e"),
("wake_lock_in", "#ff33cc"),
("job", "#cbb69d"),
("mobile_radio", "#aa0000"),
("data_conn", "#4070cf"),
("conn", "#ff6a19"),
("activepower", "#dd4477"),
("device_idle", "#37ff64"),
("motion", "#4070cf"),
("active", "#119fc8"),
("power_save", "#ff2222"),
("wifi", "#119fc8"),
("wifi_full_lock", "#888888"),
("wifi_scan", "#888888"),
("wifi_multicast", "#888888"),
("wifi_radio", "#888888"),
("wifi_running", "#109618"),
("wifi_suppl", "#119fc8"),
("wifi_signal_strength", "#9900aa"),
("phone_signal_strength", "#dc3912"),
("phone_scanning", "#dda0dd"),
("audio", "#990099"),
("phone_in_call", "#cbb69d"),
("bluetooth", "#cbb69d"),
("phone_state", "#dc3912"),
("signal_strength", "#119fc8"),
("video", "#cbb69d"),
("flashlight", "#cbb69d"),
("low_power", "#109618"),
("fg", "#dda0dd"),
("gps", "#ff9900"),
("reboot", "#ddff77"),
("power", "#ff2222"),
("status", "#9ac658"),
("health", "#888888"),
("plug", "#888888"),
("charging", "#888888"),
("pkginst", "#cbb69d"),
("pkgunin", "#cbb69d")]
_ignore_categories = ["user", "userfg"]
def __init__(self):
self._print_setting_cats = set()
for cat in self._print_setting:
self._print_setting_cats.add(cat[0])
def combine_wifi_states(self, event_list, start_time):
"""Discard intermediate states and combine events chronologically."""
tracking_states = ["disconn", "completed", "disabled", "scanning"]
selected_event_list = []
for event in event_list:
state = get_wifi_suppl_state(event[0])
if state in tracking_states:
selected_event_list.append(event)
if len(selected_event_list) <= 1:
return set(selected_event_list)
event_name = "wifi_suppl="
for e in selected_event_list:
state = get_wifi_suppl_state(e[0])
event_name += (state + "->")
event_name = event_name[:-2]
sample_event = selected_event_list[0][0]
timestr_start = sample_event.find("(")
event_name += sample_event[timestr_start:]
return set([(event_name, start_time, start_time)])
def aggregate_events(self, emit_dict):
"""Combine events with the same name occurring during the same second.
Aggregate events to keep visualization from being so noisy.
Args:
emit_dict: A dict containing events.
Returns:
A dict with repeated events happening within one sec removed.
"""
output_dict = {}
for cat, events in emit_dict.iteritems():
output_dict[cat] = []
start_dict = {}
for event in events:
start_time = event[1]
if start_time in start_dict:
start_dict[start_time].append(event)
else:
start_dict[start_time] = [event]
for start_time, event_list in start_dict.iteritems():
if cat == "wifi_suppl":
event_set = self.combine_wifi_states(event_list, start_time)
else:
event_set = set(event_list) # uniqify
for event in event_set:
output_dict[cat].append(event)
return output_dict
def print_emit_dict(self, cat, emit_dict):
for e in emit_dict[cat]:
if cat == "wake_lock":
cat_name = "wake_lock *"
else:
cat_name = cat
print "['%s', '%s', %s, %s]," % (cat_name, e[0],
timestr_to_jsdate(e[1]),
timestr_to_jsdate(e[2]))
def print_highlight_dict(self, highlight_dict):
catname = getopt_proc_name + " " + getopt_highlight_category
if getopt_highlight_category in highlight_dict:
for e in highlight_dict[getopt_highlight_category]:
print "['%s', '%s', %s, %s]," % (catname, e[0],
timestr_to_jsdate(e[1]),
timestr_to_jsdate(e[2]))
def print_events(self, emit_dict, highlight_dict):
"""print category data in the order of _print_setting.
Args:
emit_dict: Major event dict.
highlight_dict: Additional event information for -n option.
"""
emit_dict = self.aggregate_events(emit_dict)
highlight_dict = self.aggregate_events(highlight_dict)
cat_count = 0
for i in range(0, len(self._print_setting)):
cat = self._print_setting[i][0]
if cat in emit_dict:
self.print_emit_dict(cat, emit_dict)
cat_count += 1
if cat == "highlight":
self.print_highlight_dict(highlight_dict)
# handle category that is not included in _print_setting
if cat_count < len(emit_dict):
for cat in emit_dict:
if (cat not in self._print_setting_cats and
cat not in self._ignore_categories):
sys.stderr.write("event category not found: %s\n" % cat)
self.print_emit_dict(cat, emit_dict)
def print_chart_options(self, emit_dict, highlight_dict, width, height):
"""Print Options provided to the visualizater."""
color_string = ""
cat_count = 0
# construct color string following the order of _print_setting
for i in range(0, len(self._print_setting)):
cat = self._print_setting[i][0]
if cat in emit_dict:
color_string += "'%s', " % self._print_setting[i][1]
cat_count += 1
if cat == "highlight" and highlight_dict:
color_string += "'%s', " % self._print_setting[i][1]
cat_count += 1
if cat_count % 4 == 0:
color_string += "\n\t"
# handle category that is not included in _print_setting
if cat_count < len(emit_dict):
for cat in emit_dict:
if cat not in self._print_setting_cats:
color_string += "'%s', " % self._default_color
print("\toptions = {\n"
"\ttimeline: { colorByRowLabel: true},\n"
"\t'width': %s,\n"
"\t'height': %s, \n"
"\tcolors: [%s]\n"
"\t};" % (width, height, color_string))
class LegacyFormatConverter(object):
"""Convert Kit-Kat bugreport format to latest format support."""
_TIME_FORMAT = (r"\-((?P<day>\d+)d)?((?P<hrs>\d+)h)?((?P<min>\d+)m)?"
r"((?P<sec>\d+)s)?((?P<ms>\d+)ms)?$")
def __init__(self):
self._end_time = 0
self._total_duration = 0
def parse_end_time(self, line):
line = line.strip()
try:
line = line.split("dumpstate: ", 1)[1]
st = time.strptime(line, "%Y-%m-%d %H:%M:%S")
self._end_time = time.mktime(st)
except IndexError:
pass
def get_timestr(self, line_time):
"""Convert backward time string in Kit-Kat to forward time string."""
delta = self._total_duration - parse_time(line_time, self._TIME_FORMAT)
datet = datetime.datetime.utcfromtimestamp(delta)
if delta == 0:
return "0"
timestr = "+"
if delta > 24 * 60 * 60:
timestr += str(datet.day - 1) + datet.strftime("d%Hh%Mm%Ss")
elif delta > 60 * 60:
timestr += datet.strftime("%Hh%Mm%Ss").lstrip("0")
elif delta > 60:
timestr += datet.strftime("%Mm%Ss").lstrip("0")
elif delta > 1:
timestr += datet.strftime("%Ss").lstrip("0")
ms = datet.microsecond / 1000.0
timestr += "%03dms" % ms
return timestr
def get_header(self, line_time):
self._total_duration = parse_time(line_time, self._TIME_FORMAT)
start_time = self._end_time - self._total_duration
header = "Battery History\n"
header += "RESET:TIME: %s\n" % time.strftime("%Y-%m-%d-%H-%M-%S",
time.localtime(start_time))
return header
def convert(self, input_file):
"""Convert legacy format file into string that fits latest format."""
output_string = ""
history_start = False
for line in fileinput.input(input_file):
if "dumpstate:" in line:
self.parse_end_time(line)
if self._end_time:
break
fileinput.close()
if not self._end_time:
print "cannot find end time"
sys.exit(1)
for line in fileinput.input(input_file):
if not history_start and line.startswith("Battery History"):
history_start = True
continue
elif not history_start:
continue
if line.isspace(): break
line = line.strip()
arr = line.split()
if len(arr) < 4: continue
p = re.compile('"[^"]+"')
line = p.sub(space_escape, line)
split_line = line.split()
(line_time, line_battery_level, line_state) = split_line[:3]
line_events = split_line[3:]
if not self._total_duration:
output_string += self.get_header(line_time)
timestr = self.get_timestr(line_time)
event_string = " ".join(line_events)
newline = "%s _ %s %s %s\n" % (timestr, line_battery_level,
line_state, event_string)
output_string += newline
fileinput.close()
return output_string
class BHEmitter(object):
"""Process battery history section from bugreport.txt."""
_omit_cats = ["temp", "volt", "brightness", "sensor", "proc"]
# categories that have "+" and "-" events. If we see an event in these
# categories starting at time 0 without +/- sign, treat it as a "+" event.
_transitional_cats = ["plugged", "running", "wake_lock", "gps", "sensor",
"phone_in_call", "mobile_radio", "phone_scanning",
"proc", "fg", "top", "sync", "wifi", "wifi_full_lock",
"wifi_scan", "wifi_multicast", "wifi_running", "conn",
"bluetooth", "audio", "video", "wake_lock_in", "job",
"device_idle", "wifi_radio"]
_in_progress_dict = autovivify() # events that are currently in progress
_proc_dict = {} # mapping of "proc" uid to human-readable name
_search_proc_id = -1 # proc id of the getopt_proc_name
match_list = [] # list of package names that match search string
cat_list = [] # BLAME_CATEGORY summary data
def store_event(self, cat, subcat, event_str, event_time, timestr):
self._in_progress_dict[cat][subcat] = (event_str, event_time, timestr)
if getopt_debug:
print "store_event: %s in %s/%s<br>" % (event_str, cat, subcat)
def retrieve_event(self, cat, subcat):
"""Pop event from in-progress event dict if match exists."""
if cat in self._in_progress_dict:
try:
result = self._in_progress_dict[cat].pop(subcat)
if getopt_debug:
print "retrieve_event: found %s/%s<br>" % (cat, subcat)
return (True, result)
except KeyError:
pass
if getopt_debug:
print "retrieve_event: no match for event %s/%s<br>" % (cat, subcat)
return (False, (None, None, None))
def store_proc(self, e, highlight_dict):
proc_pair = get_after_equal(e)
(proc_id, proc_name) = proc_pair.split(":", 1)
self._proc_dict[proc_id] = proc_name # may overwrite
if getopt_proc_name and getopt_proc_name in proc_name and proc_id:
if proc_pair not in self.match_list:
self.match_list.append(proc_pair)
if self._search_proc_id == -1:
self._search_proc_id = proc_id
elif self._search_proc_id != proc_id:
if (proc_name[1:-1] == getopt_proc_name or
proc_name == getopt_proc_name):
# reinitialize
highlight_dict.clear()
# replace default match with complete match
self._search_proc_id = proc_id
swap(self.match_list, 0, -1)
def procs_to_str(self):
l = sorted(self._proc_dict.items(), key=lambda x: x[0])
result = ""
for i in l:
result += "%s: %s\n" % (i[0], i[1])
return result
def get_proc_name(self, proc_id):
if proc_id in self._proc_dict:
return self._proc_dict[proc_id]
else:
return ""
def annotate_event_name(self, name):
"""Modifies the event name to make it more understandable."""
if "*alarm*" in name:
try:
proc_pair = get_after_equal(name)
except IndexError:
return name
proc_id = proc_pair.split(":", 1)[0]
name = name + ":" + self.get_proc_name(proc_id)
if getopt_debug:
print "annotate_event_name: %s" % name
return name
def abbreviate_event_name(self, name):
"""Abbreviate location-related event name."""
if not getopt_show_all_wakelocks:
if "wake_lock" in name:
if "LocationManagerService" in name or "NlpWakeLock" in name:
return "LOCATION"
if "UlrDispatching" in name:
return "LOCATION"
if "GCoreFlp" in name or "GeofencerStateMachine" in name:
return "LOCATION"
if "NlpCollectorWakeLock" in name or "WAKEUP_LOCATOR" in name:
return "LOCATION"
if "GCM" in name or "C2DM" in name:
return "GCM"
return name
def process_wakelock_event_name(self, start_name, start_id, end_name, end_id):
start_name = self.process_event_name(start_name)
end_name = self.process_event_name(end_name)
event_name = "first=%s:%s, last=%s:%s" % (start_id, start_name,
end_id, end_name)
return event_name
def process_event_timestr(self, start_timestr, end_timestr):
return "(%s-%s)" % (abbrev_timestr(start_timestr),
abbrev_timestr(end_timestr))
def process_event_name(self, event_name):
event_name = self.annotate_event_name(event_name)
event_name = self.abbreviate_event_name(event_name)
return event_name.replace("'", r"\'")
def track_event_parallelism_fn(self, start_time, time_this_quanta, time_dict):
if start_time in time_dict:
time_dict[start_time] += time_this_quanta
else:
time_dict[start_time] = time_this_quanta
if getopt_debug:
print "time_dict[%d] now %f added %f" % (start_time,
time_dict[start_time],
time_this_quanta)
# track total amount of event time held per second quanta
def track_event_parallelism(self, start_time, end_time, time_dict):
apply_fn_over_range(self.track_event_parallelism_fn,
start_time, end_time, [time_dict])
def emit_event(self, cat, event_name, start_time, start_timestr,
end_event_name, end_time, end_timestr,
emit_dict, time_dict, highlight_dict):
"""Saves an event to be later visualized."""
(start_pid, start_pname) = get_proc_pair(event_name)
(end_pid, end_pname) = get_proc_pair(end_event_name)
if cat == "wake_lock" and end_pname and end_pname != start_pname:
short_event_name = self.process_wakelock_event_name(
start_pname, start_pid, end_pname, end_pid)
else:
short_event_name = self.process_event_name(event_name)
event_name = short_event_name + self.process_event_timestr(start_timestr,
end_timestr)
if getopt_highlight_category == cat:
if start_pid == self._search_proc_id or end_pid == self._search_proc_id:
add_emit_event(highlight_dict, cat,
event_name, start_time, end_time)
if cat == BLAME_CATEGORY:
self.cat_list.append((short_event_name, start_time, end_time))
end_time += getopt_bill_extra_secs
self.track_event_parallelism(start_time, end_time, time_dict)
if end_time - start_time < 1:
# HACK: visualizer library doesn't always render sub-second events
end_time += 1
add_emit_event(emit_dict, cat, event_name, start_time, end_time)
def handle_event(self, event_time, time_str, event_str,
emit_dict, time_dict, highlight_dict):
"""Handle an individual event.
Args:
event_time: Event time
time_str: Event time as string
event_str: Event string
emit_dict: A dict tracking events to draw in the timeline, by row
time_dict: A dict tracking BLAME_CATEGORY duration, by seconds
highlight_dict: A separate event dict for -n option
"""
if getopt_debug:
print "<p>handle_event: %s at %s<br>" % (event_str, time_str)
cat = get_event_category(event_str)
subcat = get_event_subcat(cat, event_str)
# events already in progress are treated as starting at time 0
if (time_str == "0" and is_standalone_event(event_str)
and cat in self._transitional_cats):
event_str = "+" + event_str
if is_proc_event(event_str): self.store_proc(event_str, highlight_dict)
if cat in self._omit_cats: return
if not is_emit_event(event_str):
# "+" event, save it until we find a matching "-"
self.store_event(cat, subcat, event_str, event_time, time_str)
return
else:
# "-" or standalone event such as "wake_reason"
start_time = 0.0
(found, event) = self.retrieve_event(cat, subcat)
if found:
(event_name, start_time, start_timestr) = event
else:
event_name = event_str
start_time = event_time
start_timestr = time_str
# Events that were still going on at the time of reboot
# should be marked as ending at the time of reboot.
if event_str == "reboot":
self.emit_remaining_events(event_time, time_str, emit_dict,
time_dict, highlight_dict)
self.emit_event(cat, event_name, start_time, start_timestr,
event_str, event_time, time_str,
emit_dict, time_dict, highlight_dict)
def generate_summary_row(self, row_to_summarize, emit_dict, start_time,
end_time):
"""Generate additional data row showing % time covered by another row."""
summarize_quanta = 60
row_name = row_to_summarize + "_pct"
if row_to_summarize not in emit_dict: return
summarize_list = emit_dict[row_to_summarize]
seconds_dict = {}
# Generate dict of seconds where the row to summarize is seen.
for i in summarize_list:
self.track_event_parallelism(i[1], i[2], seconds_dict)
# Traverse entire range of time we care about and generate % events.
for summary_start_time in range(int(start_time), int(end_time),
summarize_quanta):
summary_end_time = summary_start_time + summarize_quanta
found_ctr = 0
for second_cursor in range(summary_start_time, summary_end_time):
if second_cursor in seconds_dict:
found_ctr += 1
if found_ctr:
pct = int(found_ctr * 100 / summarize_quanta)
if pct > getopt_summarize_pct:
add_emit_event(emit_dict, row_name, "%s=%d" % (row_name, pct),
summary_start_time, summary_end_time)
def generate_summary_rows(self, emit_dict, start_time, end_time):
if getopt_summarize_pct < 0:
return
for i in ROWS_TO_SUMMARIZE:
self.generate_summary_row(i, emit_dict, start_time, end_time)
def emit_remaining_events(self, end_time, end_timestr, emit_dict, time_dict,
highlight_dict):
for cat in self._in_progress_dict:
for subcat in self._in_progress_dict[cat]:
(event_name, s_time, s_timestr) = self._in_progress_dict[cat][subcat]
self.emit_event(cat, event_name, s_time, s_timestr,
event_name, end_time, end_timestr,
emit_dict, time_dict, highlight_dict)
class BlameSynopsis(object):
"""Summary data of BLAME_CATEGORY instance used for power accounting."""
def __init__(self):
self.name = ""
self.mah = 0
self.timestr = ""
self._duration_list = []
def add(self, name, duration, mah, t):
self.name = name
self._duration_list.append(duration)
self.mah += mah
if not self.timestr:
self.timestr = time_float_to_human(t, False)
def get_count(self):
return len(self._duration_list)
def get_median_duration(self):
return sorted(self._duration_list)[int(self.get_count() / 2)]
def get_total_duration(self):
return sum(self._duration_list)
def to_str(self, total_mah, show_power):
"""Returns a summary string."""
if total_mah:
pct = self.mah * 100 / total_mah
else:
pct = 0
avg = self.get_total_duration() / self.get_count()
ret = ""
if show_power:
ret += "%.3f mAh (%.1f%%), " % (self.mah, pct)
ret += "%3s events, " % str(self.get_count())
ret += "%6.3fs total " % self.get_total_duration()
ret += "%6.3fs avg " % avg
ret += "%6.3fs median: " % self.get_median_duration()
ret += self.name
ret += " (first at %s)" % self.timestr
return ret
class PowerEmitter(object):
"""Give power accounting and bill to wake lock."""
_total_amps = 0
_total_top_amps = 0
_line_ctr = 0
_TOP_THRESH = .01
_quanta_amps = 0
_start_secs = 0
_power_dict = {}
_synopsis_dict = {}
def __init__(self, cat_list):
self._cat_list = cat_list
def get_range_power_fn(self, start_time, time_this_quanta, time_dict):
"""Assign proportional share of blame.
During any second, this event might have been held for
less than the second, and others might have been held during
that time. Here we try to assign the proportional share of the
blame.
Args:
start_time: Starting time of this quanta
time_this_quanta: Duration of this quanta
time_dict: A dict tracking total time at different starting time
Returns:
A proportional share of blame for the quanta.
"""
if start_time in self._power_dict:
total_time_held = time_dict[start_time]
multiplier = time_this_quanta / total_time_held
result = self._power_dict[start_time] * multiplier
if getopt_debug:
print("get_range_power: distance %f total time %f "
"base power %f, multiplier %f<br>" %
(time_this_quanta, total_time_held,
self._power_dict[start_time], multiplier))
assert multiplier <= 1.0
else:
if getopt_debug:
print "get_range_power: no power data available"
result = 0.0
return result
def get_range_power(self, start, end, time_dict):
power_results = apply_fn_over_range(self.get_range_power_fn,
start, end, [time_dict])
result = 0.0
for i in power_results:
result += i
return result
def bill(self, time_dict):
for _, e in enumerate(self._cat_list):
(event_name, start_time, end_time) = e
if event_name in self._synopsis_dict:
sd = self._synopsis_dict[event_name]
else:
sd = BlameSynopsis()
amps = self.get_range_power(start_time,
end_time + getopt_bill_extra_secs,
time_dict)
mah = as_to_mah(amps)
sd.add(event_name, end_time - start_time, mah, start_time)
if getopt_debug:
print "billed range %f %f at %fAs to %s<br>" % (start_time, end_time,
amps, event_name)
self._synopsis_dict[event_name] = sd
def handle_line(self, secs, amps, emit_dict):
"""Handle a power data file line."""
self._line_ctr += 1
if not self._start_secs:
self._start_secs = secs
self._quanta_amps += amps
self._total_amps += amps
self._power_dict[secs] = amps
if secs % getopt_power_quanta:
return
avg = self._quanta_amps / getopt_power_quanta
event_name = "%.3f As (%.3f A avg)" % (self._quanta_amps, avg)
add_emit_event(emit_dict, "power", event_name, self._start_secs, secs)
if self._quanta_amps > self._TOP_THRESH * getopt_power_quanta:
self._total_top_amps += self._quanta_amps
add_emit_event(emit_dict, "activepower", event_name,
self._start_secs, secs)
self._quanta_amps = 0
self._start_secs = secs
def report(self):
"""Report bill of BLAME_CATEGORY."""
mah = as_to_mah(self._total_amps)
report_power = self._line_ctr
if report_power:
avg_ma = self._total_amps/self._line_ctr
print "<p>Total power: %.3f mAh, avg %.3f" % (mah, avg_ma)
top_mah = as_to_mah(self._total_top_amps)
print ("<br>Total power above awake "
"threshold (%.1fmA): %.3f mAh %.3f As" % (self._TOP_THRESH * 1000,
top_mah,
self._total_top_amps))
print "<br>%d samples, %d min<p>" % (self._line_ctr, self._line_ctr / 60)
if report_power and getopt_bill_extra_secs:
print("<b>Power seen during each history event, including %d "
"seconds after each event:" % getopt_bill_extra_secs)
elif report_power:
print "<b>Power seen during each history event:"
else:
print "<b>Event summary:"
print "</b><br><pre>"
report_list = []
total_mah = 0.0
total_count = 0
for _, v in self._synopsis_dict.iteritems():
total_mah += v.mah
total_count += v.get_count()
if getopt_sort_by_power and report_power:
sort_term = v.mah
else:
sort_term = v.get_total_duration()
report_list.append((sort_term, v.to_str(mah, report_power)))
report_list.sort(key=lambda tup: tup[0], reverse=True)
for i in report_list:
print i[1]
print "total: %.3f mAh, %d events" % (total_mah, total_count)
print "</pre>\n"
def adjust_reboot_time(line, event_time):
# Line delta time is not reset after reboot, but wall time will
# be printed after reboot finishes. This function returns how much
# we are off and actual reboot event time.
line = line.strip()
line = line.split("TIME: ", 1)[1]
st = time.strptime(line, "%Y-%m-%d-%H-%M-%S")
wall_time = time.mktime(st)
return wall_time - event_time, wall_time
def get_app_id(uid):
"""Returns the app ID from a string.
Reverses and uses the methods defined in UserHandle.java to get
only the app ID.
Args:
uid: a string representing the uid printed in the history output
Returns:
An integer representing the specific app ID.
"""
abr_uid_re = re.compile(r"u(?P<userId>\d+)(?P<aidType>[ias])(?P<appId>\d+)")
if not uid:
return 0
if uid.isdigit():
# 100000 is the range of uids allocated for a user.
return int(uid) % 100000
if abr_uid_re.match(uid):
match = abr_uid_re.search(uid)
try:
d = match.groupdict()
if d["aidType"] == "i": # first isolated uid
return int(d["appId"]) + 99000
if d["aidType"] == "a": # first application uid
return int(d["appId"]) + 10000
return int(d["appId"]) # app id wasn't modified
except IndexError:
sys.stderr.write("Abbreviated app UID didn't match properly")
return uid
usr_time = "usrTime"
sys_time = "sysTime"
# A map of app uid to their total CPU usage in terms of user
# and system time (in ms).
app_cpu_usage = {}
def save_app_cpu_usage(uid, usr_cpu_time, sys_cpu_time):
uid = get_app_id(uid)
if uid in app_cpu_usage:
app_cpu_usage[uid][usr_time] += usr_cpu_time
app_cpu_usage[uid][sys_time] += sys_cpu_time
else:
app_cpu_usage[uid] = {usr_time: usr_cpu_time, sys_time: sys_cpu_time}
# Constants defined in android.net.ConnectivityManager
conn_constants = {
"0": "TYPE_MOBILE",
"1": "TYPE_WIFI",
"2": "TYPE_MOBILE_MMS",
"3": "TYPE_MOBILE_SUPL",
"4": "TYPE_MOBILE_DUN",
"5": "TYPE_MOBILE_HIPRI",
"6": "TYPE_WIMAX",
"7": "TYPE_BLUETOOTH",
"8": "TYPE_DUMMY",
"9": "TYPE_ETHERNET",
"17": "TYPE_VPN",
}
def main():
details_re = re.compile(r"^Details:\scpu=\d+u\+\d+s\s*(\((?P<appCpu>.*)\))?")
app_cpu_usage_re = re.compile(
r"(?P<uid>\S+)=(?P<userTime>\d+)u\+(?P<sysTime>\d+)s")
proc_stat_re = re.compile((r"^/proc/stat=(?P<usrTime>-?\d+)\s+usr,\s+"
r"(?P<sysTime>-?\d+)\s+sys,\s+"
r"(?P<ioTime>-?\d+)\s+io,\s+"
r"(?P<irqTime>-?\d+)\s+irq,\s+"
r"(?P<sirqTime>-?\d+)\s+sirq,\s+"
r"(?P<idleTime>-?\d+)\s+idle.*")
)
data_start_time = 0.0
data_stop_time = 0
data_stop_timestr = ""
on_mode = False
time_offset = 0.0
overflowed = False
reboot = False
prev_battery_level = -1
bhemitter = BHEmitter()
emit_dict = {} # maps event categories to events
time_dict = {} # total event time held per second
highlight_dict = {} # search result for -n option
is_first_data_line = True
is_dumpsys_format = False
argv_remainder = parse_argv()
input_file = argv_remainder[0]
legacy_mode = is_file_legacy_mode(input_file)
# A map of /proc/stat names to total times (in ms).
proc_stat_summary = {
"usr": 0,
"sys": 0,
"io": 0,
"irq": 0,
"sirq": 0,
"idle": 0,
}
if legacy_mode:
input_string = LegacyFormatConverter().convert(input_file)
input_file = StringIO.StringIO(input_string)
else:
input_file = open(input_file, "r")
while True:
line = input_file.readline()
if not line: break
if not on_mode and line.startswith("Battery History"):
on_mode = True
continue
elif not on_mode:
continue
if line.isspace(): break
line = line.strip()
if "RESET:TIME: " in line:
data_start_time = parse_reset_time(line)
continue
if "OVERFLOW" in line:
overflowed = True
break
if "START" in line:
reboot = True
continue
if "TIME: " in line:
continue
# escape spaces within quoted regions
p = re.compile('"[^"]+"')
line = p.sub(space_escape, line)
if details_re.match(line):
match = details_re.search(line)
try:
d = match.groupdict()
if d["appCpu"]:
for app in d["appCpu"].split(", "):
app_match = app_cpu_usage_re.search(app)
try:
a = app_match.groupdict()
save_app_cpu_usage(a["uid"],
int(a["userTime"]), int(a["sysTime"]))
except IndexError:
sys.stderr.write("App CPU usage line didn't match properly")
except IndexError:
sys.stderr.write("Details line didn't match properly")
continue
elif proc_stat_re.match(line):
match = proc_stat_re.search(line)
try:
d = match.groupdict()
if d["usrTime"]:
proc_stat_summary["usr"] += int(d["usrTime"])
if d["sysTime"]:
proc_stat_summary["sys"] += int(d["sysTime"])
if d["ioTime"]:
proc_stat_summary["io"] += int(d["ioTime"])
if d["irqTime"]:
proc_stat_summary["irq"] += int(d["irqTime"])
if d["sirqTime"]:
proc_stat_summary["sirq"] += int(d["sirqTime"])
if d["idleTime"]:
proc_stat_summary["idle"] += int(d["idleTime"])
except IndexError:
sys.stderr.write("proc/stat line didn't match properly")
continue
# pull apart input line by spaces
split_line = line.split()
if len(split_line) < 4: continue
(line_time, _, line_battery_level, fourth_field) = split_line[:4]
# "bugreport" output has an extra hex field vs "dumpsys", detect here.
if is_first_data_line:
is_first_data_line = False
try:
int(fourth_field, 16)
except ValueError:
is_dumpsys_format = True
if is_dumpsys_format:
line_events = split_line[3:]
else:
line_events = split_line[4:]
fmt = (r"\+((?P<day>\d+)d)?((?P<hrs>\d+)h)?((?P<min>\d+)m)?"
r"((?P<sec>\d+)s)?((?P<ms>\d+)ms)?$")
time_delta_s = parse_time(line_time, fmt) + time_offset
if time_delta_s < 0:
print "Warning: time went backwards: %s" % line
continue
event_time = data_start_time + time_delta_s
if reboot and "TIME:" in line:
# adjust offset using wall time
offset, event_time = adjust_reboot_time(line, event_time)
if offset < 0:
print "Warning: time went backwards: %s" % line
continue
time_offset += offset
time_delta_s = event_time - data_start_time
reboot = False
line_events = {"reboot"}
if line_battery_level != prev_battery_level:
# battery_level is not an actual event, it's on every line
if line_battery_level.isdigit():
bhemitter.handle_event(event_time, format_time(time_delta_s),
"battery_level=" + line_battery_level,
emit_dict, time_dict, highlight_dict)
for event in line_events:
# conn events need to be parsed in order to be useful
if event.startswith("conn"):
num, ev = get_after_equal(event).split(":")
if ev == "\"CONNECTED\"":
event = "+conn="
else:
event = "-conn="
if num in conn_constants:
event += conn_constants[num]
else:
event += "UNKNOWN"
bhemitter.handle_event(event_time, format_time(time_delta_s), event,
emit_dict, time_dict, highlight_dict)
prev_battery_level = line_battery_level
data_stop_time = event_time
data_stop_timestr = format_time(time_delta_s)
input_file.close()
if not on_mode:
print "Battery history not present in bugreport."
return
bhemitter.emit_remaining_events(data_stop_time, data_stop_timestr,
emit_dict, time_dict, highlight_dict)
bhemitter.generate_summary_rows(emit_dict, data_start_time,
data_stop_time)
power_emitter = PowerEmitter(bhemitter.cat_list)
if getopt_power_data_file:
for line in fileinput.input(getopt_power_data_file):
data = line.split(" ")
secs = float(data[0]) + POWER_DATA_FILE_TIME_OFFSET
amps = float(data[1])
power_emitter.handle_line(secs, amps, emit_dict)
power_emitter.bill(time_dict)
printer = Printer()
if not getopt_generate_chart_only:
print "<!DOCTYPE html>\n<html><head>\n"
report_filename = argv_remainder[0]
if getopt_report_filename:
report_filename = getopt_report_filename
header = "Battery Historian analysis for %s" % report_filename
print "<title>" + header + "</title>"
if overflowed:
print ('<font size="5" color="red">Warning: History overflowed at %s, '
'many events may be missing.</font>' %
time_float_to_human(data_stop_time, True))
print "<p>" + header + "</p>"
if legacy_mode:
print("<p><b>WARNING:</b> legacy format detected; "
"history information is limited</p>\n")
if not getopt_generate_chart_only:
print """
<script src="https://ajax.loli.net/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<script type="text/javascript" src="https://www.google.cn/jsapi?autoload={'modules':[{'name':'visualization','version':'1','packages':['timeline']}]}"></script>
"""
print "<script type=\"text/javascript\">"
if not getopt_disable_chart_drawing:
print "google.setOnLoadCallback(drawChart);\n"
print """
var dataTable;
var chart;
var options;
var default_width = 3000
function drawChart() {
container = document.getElementById('chart');
chart = new google.visualization.Timeline(container);
dataTable = new google.visualization.DataTable();
dataTable.addColumn({ type: 'string', id: 'Position' });
dataTable.addColumn({ type: 'string', id: 'Name' });
dataTable.addColumn({ type: 'date', id: 'Start' });
dataTable.addColumn({ type: 'date', id: 'End' });
dataTable.addRows([
"""
printer.print_events(emit_dict, highlight_dict)
print "]);"
width = 3000 # default width
height = 3000 # intial height
printer.print_chart_options(emit_dict, highlight_dict, width, height)
print """
//make sure allocate enough vertical space
options['height'] = dataTable.getNumberOfRows() * 40;
chart.draw(dataTable, options);
//get vertical coordinate of scale bar
var svg = document.getElementById('chart').getElementsByTagName('svg')[0];
var label = svg.children[2].children[0];
var y = label.getAttribute('y');
//plus height of scale bar
var chart_div_height = parseInt(y) + 50;
var chart_height = chart_div_height;
//set chart height to exact height
options['height'] = chart_height;
$('#chart').css('height', chart_div_height);
svg.setAttribute('height', chart_height);
var content = $('#chart').children()[0];
$(content).css('height', chart_height);
var inner = $(content).children()[0];
$(inner).css('height', chart_height);
}
function redrawChart() {
var scale = document.getElementById("scale").value;
scale = scale.replace('%', '') / 100
options['width'] = scale * default_width;
chart.draw(dataTable, options);
}
</script>
<style>
#redrawButton{
width:100px;
}
</style>
"""
if not getopt_generate_chart_only:
print "</head>\n<body>\n"
show_complete_time = False
if data_stop_time - data_start_time > 24 * 60 * 60:
show_complete_time = True
start_localtime = time_float_to_human(data_start_time, show_complete_time)
stop_localtime = time_float_to_human(data_stop_time, show_complete_time)
print "<div id=\"chart\">"
if not getopt_generate_chart_only:
print ("<b>WARNING: Visualizer disabled. "
"If you see this message, download the HTML then open it.</b>")
print "</div>"
print("<p><b>WARNING:</b>\n"
"<br>*: wake_lock field only shows the first/last wakelock held \n"
"when the system is awake. For more detail, use wake_lock_in."
"<br>To enable full wakelock reporting (post-KitKat only) : \n"
"<br>adb shell dumpsys batterystats "
"--enable full-wake-history</p>")
if getopt_proc_name:
if len(bhemitter.match_list) > 1:
print("<p><b>WARNING:</b>\n"
"<br>Multiple match found on -n option <b>%s</b>"
"<ul>" % getopt_proc_name)
for match in bhemitter.match_list:
print "<li>%s</li>" % match
print ("</ul>Showing search result for %s</p>"
% bhemitter.match_list[0].split(":", 1)[0])
elif not bhemitter.match_list:
print("<p><b>WARNING:</b>\n"
"<br>No match on -n option <b>%s</b></p>" % getopt_proc_name)
if not highlight_dict:
print ("Search - <b>%s</b> in <b>%s</b> - did not match any event"
% (getopt_proc_name, getopt_highlight_category))
print ("<pre>(Local time %s - %s, %dm elapsed)</pre>"
% (start_localtime, stop_localtime,
(data_stop_time-data_start_time) / 60))
print ("<p>\n"
"Zoom: <input id=\"scale\" type=\"text\" value=\"100%\"></input>"
"<button type=\"button\" id=\"redrawButton\""
"onclick=\"redrawChart()\">redraw</button></p>\n"
"</p>\n")
power_emitter.report()
if app_cpu_usage:
print "<b>App CPU usage:</b><br />"
print "In user time:<br />"
print "<table border=\"1\"><tr><td>UID</td><td>Duration</td></tr>"
for (uid, use) in sorted(app_cpu_usage.items(),
key=lambda x: -x[1][usr_time]):
print "<tr><td>%s</td>" % uid
print "<td>%s</td></tr>" % format_duration(use[usr_time])
print "</table>"
print "<br />In system time:<br />"
print "<table border=\"1\"><tr><td>UID</td><td>Duration</td></tr>"
for (uid, use) in sorted(app_cpu_usage.items(),
key=lambda x: -x[1][sys_time]):
print "<tr><td>%s</td>" % uid
print "<td>%s</td></tr>" % format_duration(use[sys_time])
print "</table>"
print "<br /><b>Proc/stat summary</b><ul>"
print "<li>Total User Time: %s</li>" % format_duration(
proc_stat_summary["usr"])
print "<li>Total System Time: %s</li>" % format_duration(
proc_stat_summary["sys"])
print "<li>Total IO Time: %s</li>" % format_duration(
proc_stat_summary["io"])
print "<li>Total Irq Time: %s</li>" % format_duration(
proc_stat_summary["irq"])
print "<li>Total Soft Irq Time: %s</li>" % format_duration(
proc_stat_summary["sirq"])
print "<li>Total Idle Time: %s</li>" % format_duration(
proc_stat_summary["idle"])
print "</ul>"
print "<pre>Process table:"
print bhemitter.procs_to_str()
print "</pre>\n"
if not getopt_generate_chart_only:
print "</body>\n</html>"
if __name__ == "__main__":
main()
|
py | b40a4a70b876f77f8c77f40d86acd8d15e8edaec | from abc import abstractclassmethod
import json
import socket
import struct
from typing import Any, Callable
from pathlib import Path
N_ACTIONS = 10
ACTION_SPACE = [i for i in range(N_ACTIONS)]
class BaseEnv:
def __init__(self, model_path: str, server_address: str):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_path = Path(server_address)
if server_path.exists() and server_path.is_socket():
try:
self.sock.connect(server_address)
except:
print("Unable to connect Python IPC socket.")
raise RuntimeError
else:
print("Invalid Python IPC Path")
raise ValueError
self.past_action = None
self.model = self.setup_env(model_path)
@abstractclassmethod
def setup_env(self, model_path: str) -> Callable:
"""
Sets up the model and environment variables with the path provided.
The callable takes as input the output of process_env_info, and returns
an int [0-9], signifying the next action to take.
Args:
model_path: The path to the model to load as a string.
Returns:
A Callable that predicts the next bitrate to send given the input
"""
raise NotImplementedError("Setup must be done by implementing class")
@abstractclassmethod
def process_env_info(self, env_info: dict) -> Any:
"""
Processes the current environment information to feed to the model.
Handles, for example, frame stacking, invalid data, normalization.
Args:
env_info: the dictionary passed in by Puffer server
Returns:
input to be fed into the model for prediction
"""
raise NotImplementedError("Processing must be done by implementing class")
def _recv_env_info(self) -> dict:
json_len_struct = self.sock.recv(2, socket.MSG_WAITALL)
json_len, *_ = struct.unpack("!H", json_len_struct)
json_data = self.sock.recv(json_len, socket.MSG_WAITALL)
env_info = json.loads(json_data)
return env_info
def _send_action(self, action: int) -> None:
action_json = json.dumps(dict(action=action))
action_json = action_json.encode("utf-8")
json_len_struct = struct.pack("!H", len(action_json))
self.sock.sendall(json_len_struct + action_json)
def env_loop(self) -> None:
while True:
try:
env_info = self._recv_env_info()
except Exception as e:
print("{}: {} {}".format(
"Encountered error", e.args, "while receiving env info."))
raise RuntimeError
model_input = self.process_env_info(env_info=env_info)
action = self.model(model_input)
if action not in ACTION_SPACE:
print("Action not contained in the action space.")
raise ValueError
self.past_action = action
self._send_action(action) |
py | b40a4acc0168fd66f4c438ad8e23c665be9e004f | import torch
import torch.nn.functional as F
import torch.nn as nn
from .modules import *
import config as cf
class ConvBlock(nn.Module):
def __init__(self, input_size, output_size, kernel_size=4, stride=2,
padding=1, activation=True, instance_norm=True):
super(ConvBlock, self).__init__()
self.conv = ModuleParallel(nn.Conv2d(input_size, output_size, kernel_size, stride, padding))
self.activation = activation
self.lrelu = ModuleParallel(nn.LeakyReLU(0.2, True))
self.instance_norm = instance_norm
self.insnorm_conv = InstanceNorm2dParallel(output_size)
self.use_exchange = cf.use_exchange
if self.use_exchange:
self.exchange = Exchange()
self.insnorm_threshold = cf.insnorm_threshold
self.insnorm_list = []
for module in self.insnorm_conv.modules():
if isinstance(module, nn.InstanceNorm2d):
self.insnorm_list.append(module)
def forward(self, x):
if self.activation:
out = self.conv(self.lrelu(x))
else:
out = self.conv(x)
if self.instance_norm:
out = self.insnorm_conv(out)
if self.use_exchange and len(x) > 1:
out = self.exchange(out, self.insnorm_list, self.insnorm_threshold)
return out
class ConvBlockShare(nn.Module):
def __init__(self, input_size, output_size, kernel_size=4, stride=2,
padding=1, activation=True, instance_norm=True):
super(ConvBlockShare, self).__init__()
self.conv = ModuleParallel(nn.Conv2d(input_size, output_size, kernel_size, stride, padding))
self.activation = activation
self.lrelu = ModuleParallel(nn.LeakyReLU(0.2, True))
self.instance_norm = instance_norm
self.insnorm = ModuleParallel(nn.InstanceNorm2d(output_size, affine=True, track_running_stats=True))
def forward(self, x):
if self.activation:
out = self.conv(self.lrelu(x))
else:
out = self.conv(x)
if self.instance_norm:
out = self.insnorm(out)
return out
class DeconvBlock(nn.Module):
def __init__(self, input_size, output_size, kernel_size=4, stride=2,
padding=1, instance_norm=True, dropout=False):
super(DeconvBlock, self).__init__()
self.deconv = ModuleParallel(nn.ConvTranspose2d(
input_size, output_size, kernel_size, stride, padding))
self.insnorm_deconv = InstanceNorm2dParallel(output_size)
self.drop = ModuleParallel(nn.Dropout(0.5))
self.relu = ModuleParallel(nn.ReLU(True))
self.instance_norm = instance_norm
self.dropout = dropout
def forward(self, x):
if self.instance_norm:
out = self.insnorm_deconv(self.deconv(self.relu(x)))
else:
out = self.deconv(self.relu(x))
if self.dropout:
out = self.drop(out)
return out
class Generator(nn.Module):
def __init__(self, input_dim, num_filter, output_dim):
super(Generator, self).__init__()
# Encoder
self.conv1 = ConvBlock(input_dim, num_filter, activation=False, instance_norm=False)
self.conv2 = ConvBlock(num_filter, num_filter * 2)
self.conv3 = ConvBlock(num_filter * 2, num_filter * 4)
self.conv4 = ConvBlock(num_filter * 4, num_filter * 8)
self.conv5 = ConvBlock(num_filter * 8, num_filter * 8)
self.conv6 = ConvBlock(num_filter * 8, num_filter * 8)
self.conv7 = ConvBlock(num_filter * 8, num_filter * 8)
self.conv8 = ConvBlock(num_filter * 8, num_filter * 8, instance_norm=False)
# Decoder
self.deconv1 = DeconvBlock(num_filter * 8, num_filter * 8, dropout=True)
self.deconv2 = DeconvBlock(num_filter * 8 * 2, num_filter * 8, dropout=True)
self.deconv3 = DeconvBlock(num_filter * 8 * 2, num_filter * 8, dropout=True)
self.deconv4 = DeconvBlock(num_filter * 8 * 2, num_filter * 8)
self.deconv5 = DeconvBlock(num_filter * 8 * 2, num_filter * 4)
self.deconv6 = DeconvBlock(num_filter * 4 * 2, num_filter * 2)
self.deconv7 = DeconvBlock(num_filter * 2 * 2, num_filter)
self.deconv8 = DeconvBlock(num_filter * 2, output_dim, instance_norm=False)
self.tanh = ModuleParallel(nn.Tanh())
self.alpha = nn.Parameter(torch.ones(cf.num_parallel, requires_grad=True))
self.register_parameter('alpha', self.alpha)
def forward(self, x):
# Encoder
enc1 = self.conv1(x)
enc2 = self.conv2(enc1)
enc3 = self.conv3(enc2)
enc4 = self.conv4(enc3)
enc5 = self.conv5(enc4)
enc6 = self.conv6(enc5)
enc7 = self.conv7(enc6)
enc8 = self.conv8(enc7)
# Decoder with skip-connections
dec1 = self.deconv1(enc8)
dec1 = [torch.cat([dec_, enc_], 1) for (dec_, enc_) in zip(dec1, enc7)]
dec2 = self.deconv2(dec1)
dec2 = [torch.cat([dec_, enc_], 1) for (dec_, enc_) in zip(dec2, enc6)]
dec3 = self.deconv3(dec2)
dec3 = [torch.cat([dec_, enc_], 1) for (dec_, enc_) in zip(dec3, enc5)]
dec4 = self.deconv4(dec3)
dec4 = [torch.cat([dec_, enc_], 1) for (dec_, enc_) in zip(dec4, enc4)]
dec5 = self.deconv5(dec4)
dec5 = [torch.cat([dec_, enc_], 1) for (dec_, enc_) in zip(dec5, enc3)]
dec6 = self.deconv6(dec5)
dec6 = [torch.cat([dec_, enc_], 1) for (dec_, enc_) in zip(dec6, enc2)]
dec7 = self.deconv7(dec6)
dec7 = [torch.cat([dec_, enc_], 1) for (dec_, enc_) in zip(dec7, enc1)]
dec8 = self.deconv8(dec7)
out = self.tanh(dec8)
ens = 0
alpha_soft = F.softmax(self.alpha, dim=0)
for l in range(cf.num_parallel):
ens += alpha_soft[l] * out[l].detach()
out.append(ens)
return out, alpha_soft
def normal_weight_init(self, mean=0.0, std=0.02):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, mean, std)
if isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, mean, std)
class Discriminator(nn.Module):
def __init__(self, input_dim, num_filter, output_dim):
super(Discriminator, self).__init__()
self.conv1 = ConvBlockShare(input_dim, num_filter, activation=False, instance_norm=False)
self.conv2 = ConvBlockShare(num_filter, num_filter * 2)
self.conv3 = ConvBlockShare(num_filter * 2, num_filter * 4)
self.conv4 = ConvBlockShare(num_filter * 4, num_filter * 8, stride=1)
self.conv5 = ConvBlockShare(num_filter * 8, output_dim, stride=1, instance_norm=False)
self.sigmoid = ModuleParallel(nn.Sigmoid())
def forward(self, x, label):
if isinstance(label, list):
x = [torch.cat([x_, label_], 1) for (x_, label_) in zip(x, label)]
else:
x = [torch.cat([x_, label], 1) for x_ in x]
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
out = self.sigmoid(x)
return out
def normal_weight_init(self, mean=0.0, std=0.02):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, mean, std)
|
py | b40a4b00f672319c0af22d0534ebd08efab6cda6 | # Generated by Django 3.1.3 on 2020-11-15 00:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
('description', models.CharField(max_length=50)),
('pub_date', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='gallery.category')),
('location', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='gallery.location')),
],
),
]
|
py | b40a4b0b6c98f1d953f56a305878d958f151b977 | from click import echo, secho
from sqlalchemy.exc import ProgrammingError, IntegrityError
from sqlparse import split, format
from sqlalchemy.sql import ClauseElement
from sqlalchemy import create_engine, text
from sqlalchemy.orm import sessionmaker
from contextlib import contextmanager
from sqlalchemy_utils import create_database, database_exists, drop_database
from macrostrat.utils import cmd, get_logger
from time import sleep
log = get_logger(__name__)
def db_session(engine):
factory = sessionmaker(bind=engine)
return factory()
def run_query(db, filename_or_query, **kwargs):
"""
Run a query on a SQL database (represented by
a SQLAlchemy database object) and turn it into a
`Pandas` dataframe.
"""
from pandas import read_sql
if "SELECT" in str(filename_or_query):
# We are working with a query string instead of
# an SQL file.
sql = filename_or_query
else:
with open(filename_or_query) as f:
sql = f.read()
return read_sql(sql, db, **kwargs)
def pretty_print(sql, **kwargs):
for line in sql.split("\n"):
for i in ["SELECT", "INSERT", "UPDATE", "CREATE", "DROP", "DELETE", "ALTER"]:
if not line.startswith(i):
continue
start = line.split("(")[0].strip().rstrip(";").replace(" AS", "")
secho(start, **kwargs)
return
def run_sql(session, sql, params=None, stop_on_error=False):
queries = split(sql)
for q in queries:
sql = format(q, strip_comments=True).strip()
if sql == "":
continue
try:
session.execute(text(sql), params=params)
if hasattr(session, "commit"):
session.commit()
pretty_print(sql, dim=True)
except (ProgrammingError, IntegrityError) as err:
err = str(err.orig).strip()
dim = "already exists" in err
if hasattr(session, "rollback"):
session.rollback()
pretty_print(sql, fg=None if dim else "red", dim=True)
if dim:
err = " " + err
secho(err, fg="red", dim=dim)
if stop_on_error:
return
def _exec_raw_sql(engine, sql):
"""Execute SQL unsafely on an sqlalchemy Engine"""
try:
engine.execute(text(sql))
pretty_print(sql, dim=True)
except (ProgrammingError, IntegrityError) as err:
err = str(err.orig).strip()
dim = "already exists" in err
pretty_print(sql, fg=None if dim else "red", dim=True)
if dim:
err = " " + err
secho(err, fg="red", dim=dim)
def run_sql_file(session, sql_file, params=None):
sql = open(sql_file).read()
run_sql(session, sql, params=params)
def run_sql_query_file(session, sql_file, params=None):
sql = open(sql_file).read()
return session.execute(sql, params)
def get_or_create(session, model, defaults=None, **kwargs):
"""
Get an instance of a model, or create it if it doesn't
exist.
https://stackoverflow.com/questions/2546207
"""
instance = session.query(model).filter_by(**kwargs).first()
if instance:
instance._created = False
return instance
else:
params = dict(
(k, v) for k, v in kwargs.items() if not isinstance(v, ClauseElement)
)
params.update(defaults or {})
instance = model(**params)
session.add(instance)
instance._created = True
return instance
def get_db_model(db, model_name: str):
return getattr(db.model, model_name)
@contextmanager
def temp_database(conn_string, drop=True, ensure_empty=False):
"""Create a temporary database and tear it down after tests."""
if ensure_empty:
drop_database(conn_string)
if not database_exists(conn_string):
create_database(conn_string)
try:
yield create_engine(conn_string)
finally:
if drop:
drop_database(conn_string)
def connection_args(engine):
"""Get PostgreSQL connection arguments for a engine"""
_psql_flags = {"-U": "username", "-h": "host", "-p": "port", "-P": "password"}
if isinstance(engine, str):
# We passed a connection url!
engine = create_engine(engine)
flags = ""
for flag, _attr in _psql_flags.items():
val = getattr(engine.url, _attr)
if val is not None:
flags += f" {flag} {val}"
return flags, engine.url.database
def db_isready(engine_or_url):
args, _ = connection_args(engine_or_url)
c = cmd("pg_isready", args, capture_output=True)
return c.returncode == 0
def wait_for_database(engine_or_url, quiet=False):
msg = "Waiting for database..."
while not db_isready(engine_or_url):
if not quiet:
echo(msg, err=True)
log.info(msg)
sleep(1)
|
py | b40a4b262b5480be5de6790ac386b88a3e412e7b | import sys
import regex
import cgatcore.iotools as iotools
import pysam
import logging
import argparse
import Levenshtein
import pandas as pd
import scipy.sparse as sparse
import scipy.io as io
import os
# ########################################################################### #
# ###################### Set up the logging ################################# #
# ########################################################################### #
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
L = logging.getLogger("save_mtx.py")
# ########################################################################### #
# ######################## Parse the arguments ############################## #
# ########################################################################### #
parser = argparse.ArgumentParser()
parser.add_argument("--data", default=None, type=str,
help='counts data from umi_tools')
parser.add_argument("--dir", default=None, type=str,
help='dir for output mtx')
args = parser.parse_args()
L.info("args:")
print(args)
# ########################################################################### #
# ######################## Code ############################## #
# ########################################################################### #
def save_mtx(data, destination, cell_names=None, gene_names=None):
"""Save a mtx file - taken from https://www.programcreek.com/python/?code=KrishnaswamyLab%2Fscprep%2Fscprep-master%2Fscprep%2Fio%2Fmtx.py
Parameters
----------
data : array-like, shape=[n_samples, n_features]
Input data, saved to destination/matrix.mtx
destination : str
Directory in which to save the data
cell_names : list-like, shape=[n_samples], optional (default: None)
Cell names associated with rows, saved to destination/cell_names.tsv.
If `data` is a pandas DataFrame and `cell_names` is None,
these are autopopulated from `data.index`.
gene_names : list-like, shape=[n_features], optional (default: None)
Cell names associated with rows, saved to destination/gene_names.tsv.
If `data` is a pandas DataFrame and `gene_names` is None,
these are autopopulated from `data.columns`.
Examples
--------
>>> import scprep
>>> scprep.io.save_mtx(data, destination="my_data")
>>> reload = scprep.io.load_mtx("my_data/matrix.mtx",
... cell_names="my_data/cell_names.tsv",
... gene_names="my_data/gene_names.tsv")
"""
if isinstance(data, pd.DataFrame):
if cell_names is None:
cell_names = data.index
if gene_names is None:
gene_names = data.columns
data = sparse.coo_matrix(data)
# handle ~/ and relative paths
#print(cell_names)
destination = os.path.expanduser(destination)
if not os.path.isdir(destination):
os.mkdir(destination)
if cell_names is not None:
with open(os.path.join(destination, "genes.barcodes.txt"), "w") as handle:
for name in cell_names:
handle.write("{}\n".format(name))
if gene_names is not None:
with open(os.path.join(destination, "genes.genes.txt"), "w") as handle:
for name in gene_names:
handle.write("{}\n".format(name))
io.mmwrite(os.path.join(destination, "genes.mtx"), data)
infile = pd.read_table(args.data, sep="\t", header=0)
infile = infile[infile['count'] > 0]
infile = infile.pivot(index='cell', columns='gene', values='count')
infile.fillna(0, inplace=True)
save_mtx(infile, args.dir)
|
py | b40a4b4ce679f710595244c4d7c883c271bb4c04 | # Generated by Django 3.2.11 on 2022-01-13 13:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0027_option_choices'),
]
operations = [
migrations.AlterField(
model_name='option',
name='choices',
field=models.CharField(blank=True, help_text='Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.', max_length=1024, verbose_name='choices'),
),
]
|
py | b40a4c181b91fa05d996feb22c0052c5d964dc77 | from kivy.properties import NumericProperty, BooleanProperty
from kivy.uix.button import Button
from kivy.animation import Animation
from coloredLayout import ColoredLayout
class Runner(ColoredLayout):
value = NumericProperty(0)
finished = BooleanProperty(False)
def __init__(self,
total=10, steptime=1, autorepeat=True,
bcolor=(0.23, 1, 0, 1),
btext_inprogress='А ну сел',
**kwargs):
super().__init__(**kwargs)
self.total = total
self.autorepeat = autorepeat
self.btext_inprogress = btext_inprogress
self.animation = (Animation(pos_hint={'top': 0.1}, duration=steptime/2)
+ Animation(pos_hint={'top': 1.0}, duration=steptime/2))
self.animation.on_progress = self.next
self.btn = Button(size_hint=(1, 0.1), pos_hint={'top': 1.0}, background_color=bcolor)
self.add_widget(self.btn)
'''def restart(self, total):
self.total = total
self.start()'''
def start(self):
self.value = 0
self.finished = False
self.btn.text = self.btext_inprogress
if self.autorepeat:
self.animation.repeat = True
self.animation.start(self.btn)
def next(self, widget, step):
if step == 1.0:
self.value += 1
if self.value >= self.total:
self.animation.repeat = False
self.finished = True |
py | b40a4c966ca71fcfbd52a9d02dc2c42d7e2218e6 | # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Modifications made by Cloudera are:
# Copyright (c) 2016 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
from cdpcli import CDP_ACCESS_KEY_ID_KEY_NAME, \
CDP_ACCESS_TOKEN_KEY_NAME, \
CDP_PRIVATE_KEY_KEY_NAME, \
CDP_REGION_KEY_NAME
from cdpcli.compat import compat_input
from cdpcli.endpoint import EndpointResolver
from cdpcli.exceptions import ProfileNotFound
from cdpcli.extensions.commands import BasicCommand
from cdpcli.extensions.configure import CREDENTIAL_FILE_COMMENT
from cdpcli.extensions.configure.get import ConfigureGetCommand
from cdpcli.extensions.configure.list import ConfigureListCommand
from cdpcli.extensions.configure.set import ConfigureSetCommand
from cdpcli.extensions.writer import ConfigFileWriter
from . import mask_value
class InteractivePrompter(object):
def get_value(self, current_value, config_name, prompt_text=''):
if config_name in (CDP_ACCESS_KEY_ID_KEY_NAME, CDP_PRIVATE_KEY_KEY_NAME):
current_value = mask_value(current_value)
interactive_long_input = False
if config_name == CDP_PRIVATE_KEY_KEY_NAME:
# See THUN-222 for context on why this is necessary
interactive_long_input = True
response = compat_input(
"%s [%s]: " % (prompt_text, current_value),
interactive_long_input)
if not response:
# If the user hits enter, we return a value of None
# instead of an empty string. That way we can determine
# whether or not a value has changed.
response = None
return response
class ConfigureCommand(BasicCommand):
NAME = 'configure'
DESCRIPTION = BasicCommand.FROM_FILE()
SYNOPSIS = ('cdp configure [--profile profile-name]')
EXAMPLES = (
'To create a new default configuration for CDP Public Cloud::\n'
'\n'
' $ cdp configure\n'
' CDP Access Key ID [None]: accesskey\n'
' CDP Private Key [None]: privatekey\n'
' CDP Endpoint URL (blank for public cloud) [None]:\n'
)
SUBCOMMANDS = [
{'name': 'list', 'command_class': ConfigureListCommand},
{'name': 'get', 'command_class': ConfigureGetCommand},
{'name': 'set', 'command_class': ConfigureSetCommand},
]
# If you want to add new values to prompt, update this list here.
VALUES_TO_PROMPT = [
# (config_name, prompt_text)
(CDP_ACCESS_KEY_ID_KEY_NAME, "CDP Access Key ID"),
(CDP_PRIVATE_KEY_KEY_NAME, "CDP Private Key"),
(CDP_REGION_KEY_NAME, "CDP Region"),
(EndpointResolver.CDP_ENDPOINT_URL_KEY_NAME,
"CDP Endpoint URL (blank for public cloud)")
]
def __init__(self, prompter=None, config_writer=None):
super(ConfigureCommand, self).__init__()
if prompter is None:
prompter = InteractivePrompter()
self._prompter = prompter
if config_writer is None:
config_writer = ConfigFileWriter()
self._config_writer = config_writer
def _run_main(self, client_creator, parsed_args, parsed_globals):
# Called when invoked with no args "cdp configure"
new_values = {}
# This is the config from the config file scoped to a specific
# profile.
try:
context = client_creator.context
config = context.get_scoped_config()
except ProfileNotFound:
config = {}
for config_name, prompt_text in self.VALUES_TO_PROMPT:
current_value = config.get(config_name)
new_value = self._prompter.get_value(current_value, config_name,
prompt_text)
if new_value is not None and new_value != current_value:
new_values[config_name] = new_value
config_filename = os.path.expanduser(
context.get_config_variable('config_file'))
if new_values:
self._write_out_creds_file_values(context,
new_values,
parsed_globals.profile)
if parsed_globals.profile is not None:
new_values['__section__'] = (
'profile %s' % parsed_globals.profile)
self._config_writer.update_config(new_values, config_filename)
def _write_out_creds_file_values(self, context, new_values, profile_name):
# The access_key/private_key/access_token are now *always* written to the shared
# credentials file (~/.cdp/credentials).
credential_file_values = {}
if CDP_ACCESS_KEY_ID_KEY_NAME in new_values:
credential_file_values[CDP_ACCESS_KEY_ID_KEY_NAME] = new_values.pop(
CDP_ACCESS_KEY_ID_KEY_NAME)
if CDP_PRIVATE_KEY_KEY_NAME in new_values:
credential_file_values[CDP_PRIVATE_KEY_KEY_NAME] = new_values.pop(
CDP_PRIVATE_KEY_KEY_NAME)
if CDP_ACCESS_TOKEN_KEY_NAME in new_values:
credential_file_values[CDP_ACCESS_TOKEN_KEY_NAME] = new_values.pop(
CDP_ACCESS_TOKEN_KEY_NAME)
if credential_file_values:
if profile_name is not None:
credential_file_values['__section__'] = profile_name
shared_credentials_filename = os.path.expanduser(
context.get_config_variable('credentials_file'))
self._config_writer.update_config(
credential_file_values,
shared_credentials_filename,
config_file_comment=CREDENTIAL_FILE_COMMENT)
|
py | b40a4d5a59982394b80373615a198556ed26b299 | '''
A program demonstrating the use and capabilities of a particular image segmentation algorithm described
in Jasper R. R. Uijlings, Koen E. A. van de Sande, Theo Gevers, Arnold W. M. Smeulders:
"Selective Search for Object Recognition"
International Journal of Computer Vision, Volume 104 (2), page 154-171, 2013
Usage:
./selectivesearchsegmentation_demo.py input_image (single|fast|quality)
Use "a" to display less rects, 'd' to display more rects, "q" to quit.
'''
import cv2 as cv
import sys
if __name__ == '__main__':
print(__doc__)
img = cv.imread('../Data/chicky_512.png')
cv.setUseOptimized(True)
cv.setNumThreads(8)
gs = cv.ximgproc.segmentation.createSelectiveSearchSegmentation()
gs.setBaseImage(img)
gs.switchToSelectiveSearchFast()
#gs.switchToSelectiveSearchQuality()
#if (sys.argv[2][0] == 's'):
# gs.switchToSingleStrategy()
#elif (sys.argv[2][0] == 'f'):
# gs.switchToSelectiveSearchFast()
#elif (sys.argv[2][0] == 'q'):
# gs.switchToSelectiveSearchQuality()
#else:
# print(__doc__)
# sys.exit(1)
rects = gs.process()
nb_rects = 50
while True:
wimg = img.copy()
for i in range(len(rects)):
if (i < nb_rects):
x, y, w, h = rects[i]
cv.rectangle(wimg, (x, y), (x+w, y+h), (0, 255, 0), 1, cv.LINE_AA)
cv.imshow("Output", wimg);
c = cv.waitKey()
if (c == 100):
nb_rects += 10
elif (c == 97 and nb_rects > 10):
nb_rects -= 10
elif (c == 113):
break
cv.destroyAllWindows()
|
py | b40a4e289b023531e1878813b1c2f172a56791dc | import json
import logging
from collections import OrderedDict
from codecs import open as copen
from decimal import Decimal
from os.path import exists
from os import stat, remove, makedirs, environ
from time import strftime, time, localtime
import requests
from flask import jsonify, Flask, redirect, request, send_file
from flask import make_response
from flask.json import JSONEncoder
from flask_cors import CORS
from rq import Queue
import qmk_redis
import qmk_storage
from kle2xy import KLE2xy
from qmk_commands import keymap_skeleton
from qmk_compiler import compile_json, redis, ping
from update_kb_redis import update_kb_redis
if exists('version.txt'):
with open('version.txt') as version_file:
__VERSION__ = version_file.read()
else:
__VERSION__ = '__UNKNOWN__'
UPDATE_API = environ.get('UPDATE_API', 'false') == 'true' # Whether or not the /update route is enabled
CHECK_TIMEOUT = environ.get('CHECK_TIMEOUT', 300) # How long the checks need to fail before we are degraded
KEYMAP_JSON_DOCUMENTATION = """This file is a configurator export. It can be used directly with QMK's source code.
To setup your QMK environment check out the tutorial: https://docs.qmk.fm/#/newbs
You can convert this file to a keymap.c using this command: `qmk json2c %(keyboard)s_%(keymap)s.json`
You can compile this keymap using this command: `qmk compile %(keyboard)s_%(keymap)s.json`"""
## Classes
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
try:
if isinstance(obj, Decimal):
if obj % 2 in (Decimal(0), Decimal(1)):
return int(obj)
return float(obj)
except TypeError:
pass
return JSONEncoder.default(self, obj)
# Useful objects
app = Flask(__name__)
app.json_encoder = CustomJSONEncoder
app.config['JSON_SORT_KEYS'] = False
cache_dir = 'kle_cache'
gist_url = 'https://api.github.com/gists/%s'
cors = CORS(app, resources={'/v*/*': {'origins': '*'}})
rq = Queue(connection=redis)
api_status = {
'last_ping': qmk_redis.get('qmk_api_last_ping'),
'queue_length': len(rq),
'status': 'starting',
'version': __VERSION__,
}
## Helper functions
def check_pings():
"""Check the ping values and update api_status with them.
"""
api_status['queue_length'] = len(rq)
for redis_key in ('qmk_api_last_ping', 'qmk_api_tasks_ping'):
key = redis_key.replace('qmk_api_', '')
value = qmk_redis.get(redis_key)
api_status[key] = value
if value:
if time() - float(value) > CHECK_TIMEOUT:
api_status['status'] = 'degraded'
api_status['status_%s' % key] = 'degraded'
else:
api_status['status'] = 'running'
api_status['status_%s' % key] = 'good'
else:
api_status['status'] = 'degraded'
api_status['status_%s' % key] = 'degraded'
def client_ip():
"""Returns the client's IP address.
"""
return request.headers.get('X-Forwarded-For', request.remote_addr)
def error(message, code=400, **kwargs):
"""Return a structured JSON error message.
"""
kwargs['message'] = message
return jsonify(kwargs), code
def get_job_metadata(job_id):
"""Fetch a job's metadata from the file store.
"""
json_text = qmk_storage.get('%s/%s.json' % (job_id, job_id))
return json.loads(json_text)
def fetch_kle_json(gist_id):
"""Returns the JSON for a keyboard-layout-editor URL.
"""
cache_file = '/'.join((cache_dir, gist_id))
headers = {}
if exists(cache_file):
# We have a cached copy
file_stat = stat(cache_file)
file_age = time() - file_stat.st_mtime
if file_stat.st_size == 0:
logging.warning('Removing zero-length cache file %s', cache_file)
remove(cache_file)
elif file_age < 30:
logging.info('Using cache file %s (%s < 30)', cache_file, file_age)
return copen(cache_file, encoding='UTF-8').read()
else:
headers['If-Modified-Since'] = strftime('%a, %d %b %Y %H:%M:%S %Z', localtime(file_stat.st_mtime))
logging.warning('Adding If-Modified-Since: %s to headers.', headers['If-Modified-Since'])
keyboard = requests.get(gist_url % gist_id, headers=headers)
if keyboard.status_code == 304:
logging.debug("Source for %s hasn't changed, loading from disk.", cache_file)
return copen(cache_file, encoding='UTF-8').read()
keyboard = keyboard.json()
for file in keyboard['files']:
keyboard_text = keyboard['files'][file]['content']
break # First file wins, hope there's only one...
if not exists(cache_dir):
makedirs(cache_dir)
with copen(cache_file, 'w', encoding='UTF-8') as fd:
fd.write(keyboard_text) # Write this to a cache file
return keyboard_text
def kle_to_qmk(kle):
"""Convert a kle layout to qmk's layout format.
"""
layout = []
for row in kle:
for key in row:
if key['decal']:
continue
qmk_key = OrderedDict(
label="",
x=key['column'],
y=key['row'],
)
if key['width'] != 1:
qmk_key['w'] = key['width']
if key['height'] != 1:
qmk_key['h'] = key['height']
if 'name' in key and key['name']:
qmk_key['label'] = key['name'].split('\n', 1)[0]
else:
del (qmk_key['label'])
layout.append(qmk_key)
return layout
## Views
@app.route('/', methods=['GET'])
def root():
"""Serve up the documentation for this API.
"""
return redirect('https://docs.qmk.fm/#/api_docs')
@app.route('/v1', methods=['GET'])
def GET_v1():
"""Return the API's status.
"""
check_pings()
return jsonify({'children': ['compile', 'converters', 'keyboards', 'skeletons'], **api_status})
@app.route('/v1/healthcheck', methods=['GET'])
def GET_v1_healthcheck():
"""Checks over the health of the API.
Note: This is used for operational purposes. Please don't hit it on the
live api.qmk.fm site without talking to us first. Most of this
information is available at the /v1 endpoint as well.
"""
rq.enqueue(ping, at_front=True)
check_pings()
return jsonify(api_status)
@app.route('/v1/update', methods=['GET'])
def GET_v1_update():
"""Triggers an update of the API.
"""
check_pings()
if UPDATE_API:
rq.enqueue(update_kb_redis)
return jsonify({'result': UPDATE_API, **api_status})
@app.route('/v1/converters', methods=['GET'])
def GET_v1_converters():
"""Return the list of converters we support.
"""
return jsonify({'children': ['kle']})
@app.route('/v1/converters/kle2qmk', methods=['POST'])
@app.route('/v1/converters/kle', methods=['POST'])
def POST_v1_converters_kle():
"""Convert a KLE layout to QMK's layout format.
"""
data = request.get_json(force=True)
if not data:
return error("Invalid JSON data!")
if 'id' in data:
gist_id = data['id'].split('/')[-1]
raw_code = fetch_kle_json(gist_id)[1:-1]
elif 'raw' in data:
raw_code = data['raw']
else:
return error('You must supply either "id" or "raw" labels.')
try:
kle = KLE2xy(raw_code)
except Exception as e:
logging.error('Could not parse KLE raw data: %s', raw_code)
logging.exception(e)
return error('Could not parse KLE raw data.') # FIXME: This should be better
keyboard = OrderedDict(
keyboard_name=kle.name,
url='',
maintainer='qmk',
width=kle.columns,
height=kle.rows,
layouts={'LAYOUT': {
'layout': 'LAYOUT_JSON_HERE'
}},
)
keyboard = json.dumps(keyboard, indent=4, separators=(', ', ': '), sort_keys=False, cls=CustomJSONEncoder)
layout = json.dumps(kle_to_qmk(kle), separators=(', ', ':'), cls=CustomJSONEncoder)
keyboard = keyboard.replace('"LAYOUT_JSON_HERE"', layout)
response = make_response(keyboard)
response.mimetype = app.config['JSONIFY_MIMETYPE']
return response
@app.route('/v1/keyboards', methods=['GET'])
def GET_v1_keyboards():
"""Return a list of keyboards
"""
json_blob = qmk_redis.get('qmk_api_keyboards')
return jsonify(json_blob)
@app.route('/v1/keyboards/all', methods=['GET'])
def GET_v1_keyboards_all():
"""Return JSON showing all available keyboards and their layouts.
"""
allkb = qmk_redis.get('qmk_api_kb_all')
if allkb:
return jsonify(allkb)
return error('An unknown error occured', 500)
@app.route('/v1/keyboards/<path:keyboard>', methods=['GET'])
def GET_v1_keyboards_keyboard(keyboard):
"""Return JSON showing data about a keyboard
"""
keyboards = qmk_redis.get('qmk_api_last_updated')
keyboards['keyboards'] = {}
for kb in keyboard.split(','):
kb_data = qmk_redis.get('qmk_api_kb_' + kb)
if kb_data:
keyboards['keyboards'][kb] = kb_data
if not keyboards['keyboards']:
return error('No such keyboard: ' + keyboard, 404)
return jsonify(keyboards)
@app.route('/v1/keyboards/<path:keyboard>/readme', methods=['GET'])
def GET_v1_keyboards_keyboard_readme(keyboard):
"""Returns the readme for a keyboard.
"""
readme = qmk_redis.get('qmk_api_kb_%s_readme' % (keyboard))
response = make_response(readme)
response.mimetype = 'text/markdown'
return response
@app.route('/v1/keyboards/<path:keyboard>/keymaps/<string:keymap>', methods=['GET'])
def GET_v1_keyboards_keyboard_keymaps_keymap(keyboard, keymap):
"""Return JSON showing data about a keyboard's keymap
Deprecated because it's unused and takes up valuable memory and processing time.
"""
return error('No such keymap: ' + keymap, 404)
@app.route('/v1/keyboards/<path:keyboard>/keymaps/<string:keymap>/readme', methods=['GET'])
def GET_v1_keyboards_keyboard_keymaps_keymap_readme(keyboard, keymap):
"""Returns the readme for a keymap.
Deprecated because it's unused and takes up valuable memory and processing time.
"""
return error('No such keymap: ' + keymap, 404)
@app.route('/v1/keyboards/build_status', methods=['GET'])
def GET_v1_keyboards_build_status():
"""Returns a dictionary of keyboard/layout pairs. Each entry is True if the keyboard works in configurator and
false if it doesn't.
"""
json_blob = qmk_redis.get('qmk_api_keyboards_tested')
return jsonify(json_blob)
@app.route('/v1/keyboards/build_log', methods=['GET'])
def GET_v1_keyboards_build_log():
"""Returns a dictionary of keyboard/layout pairs. Each entry is a dictionary with the following keys:
* `works`: Boolean indicating whether the compile was successful
* `message`: The compile output for failed builds
"""
json_data = qmk_redis.get('qmk_api_configurator_status')
return jsonify(json_data)
@app.route('/v1/keyboards/error_log', methods=['GET'])
def GET_v1_keyboards_error_log():
"""Return the error log from the last run.
"""
json_blob = qmk_redis.get('qmk_api_update_error_log')
return jsonify(json_blob)
@app.route('/v1/usb', methods=['GET'])
def GET_v1_usb():
"""Returns the list of USB device identifiers used in QMK.
"""
json_blob = qmk_redis.get('qmk_api_usb_list')
return jsonify(json_blob)
@app.route('/v1/skeletons', methods=['GET'])
def GET_v1_skeletons():
"""Return the list of available skeletons.
"""
return jsonify({'children': ['keymap']})
@app.route('/v1/skeletons/keymap', methods=['GET'])
def GET_v1_skeletons_keymap():
"""Returns a keymap skeleton.
"""
return jsonify(keymap_skeleton())
@app.route('/v1/compile', methods=['POST'])
def POST_v1_compile():
"""Enqueue a compile job.
"""
data = request.get_json(force=True)
if not data:
return error("Invalid JSON data!")
if '.' in data['keyboard'] or '/' in data['keymap']:
return error("Buzz off hacker.", 422)
bad_keys = []
for key in ('keyboard', 'keymap', 'layout', 'layers'):
if key not in data:
bad_keys.append(key)
if bad_keys:
return error("Invalid or missing keys: %s" % (', '.join(bad_keys),))
if 'documentation' not in data:
data['documentation'] = KEYMAP_JSON_DOCUMENTATION % data
job = compile_json.delay(data, client_ip())
return jsonify({'enqueued': True, 'job_id': job.id})
@app.route('/v1/compile/<string:job_id>', methods=['GET'])
def GET_v1_compile_job_id(job_id):
"""Fetch the status of a compile job.
"""
# Check redis first.
job = rq.fetch_job(job_id)
if job:
if job.is_finished:
status = 'finished'
elif job.is_queued:
status = 'queued'
elif job.is_started:
status = 'running'
elif job.is_failed:
status = 'failed'
else:
logging.error('Unknown job status!')
status = 'unknown'
return jsonify({
'created_at': job.created_at,
'enqueued_at': job.enqueued_at,
'id': job.id,
'is_failed': job.is_failed or (job.result and job.result.get('returncode') != 0),
'status': status,
'result': job.result,
})
# Check for cached json if it's not in redis
job = get_job_metadata(job_id)
if job:
return jsonify(job)
# Couldn't find it
return error("Compile job not found", 404)
@app.route('/v1/compile/<string:job_id>/download', methods=['GET'])
@app.route('/v1/compile/<string:job_id>/hex', methods=['GET'])
def GET_v1_compile_job_id_bin(job_id):
"""Download a compiled firmware.
New clients should prefer the `/download` URL. `/hex` is deprecated and will be removed in a future version.
"""
job = get_job_metadata(job_id)
if not job:
return error("Compile job not found", 404)
return redirect(qmk_storage.get_public_url('%(id)s/%(firmware_filename)s' % job['result']))
@app.route('/v1/compile/<string:job_id>/keymap', methods=['GET'])
def GET_v1_compile_job_id_keymap(job_id):
"""Download the keymap for a completed compile job.
"""
job = get_job_metadata(job_id)
if not job:
return error("Compile job not found", 404)
return redirect(qmk_storage.get_public_url('%(id)s/%(keymap_archive)s' % job['result']))
@app.route('/v1/compile/<string:job_id>/source', methods=['GET'])
def GET_v1_compile_job_id_src(job_id):
"""Download the full source for a completed compile job.
"""
job = get_job_metadata(job_id)
if not job:
return error("Compile job not found", 404)
return redirect(qmk_storage.get_public_url('%(id)s/%(source_archive)s' % job['result']))
if __name__ == '__main__':
# Start the webserver
app.run(debug=True)
|
py | b40a4f927da0f72af2510e33c8d2c95118a595db | #
# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Test reads single characters from stdio and measures time between occurrences."""
from .. import BaseHostTest
class WaitusTest(BaseHostTest):
"""Test ticker timing."""
__result = None
DEVIATION = 0.10 # +/-10%
ticks = []
def _callback_exit(self, key, value, timeout):
self.notify_complete()
def _callback_tick(self, key, value, timestamp):
"""{{tick;%d}}}."""
self.log("tick! " + str(timestamp))
self.ticks.append((key, value, timestamp))
def setup(self):
"""Set up the test case."""
self.register_callback("exit", self._callback_exit)
self.register_callback("tick", self._callback_tick)
def result(self):
"""Report test result."""
def sub_timestamps(t1, t2):
delta = t1 - t2
deviation = abs(delta - 1.0)
# return True if delta > 0 and deviation <= self.DEVIATION else False
return deviation <= self.DEVIATION
# Check if time between ticks was accurate
if self.ticks:
# If any ticks were recorded
timestamps = [timestamp for _, _, timestamp in self.ticks]
self.log(str(timestamps))
m = map(sub_timestamps, timestamps[1:], timestamps[:-1])
self.log(str(m))
self.__result = all(m)
else:
self.__result = False
return self.__result
def teardown(self):
"""Tear down test."""
pass
|
py | b40a5050f43243fda55dfb9172ab603dd87263e9 | import aiohttp
import asyncio
import functools
import json
import time
from pprint import pprint
from typing import List, Dict, Optional, Callable
from chia.cmds.wallet_funcs import print_balance, wallet_coin_unit
from chia.pools.pool_wallet_info import PoolWalletInfo, PoolSingletonState
from chia.protocols.pool_protocol import POOL_PROTOCOL_VERSION
from chia.rpc.farmer_rpc_client import FarmerRpcClient
from chia.rpc.wallet_rpc_client import WalletRpcClient
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.bech32m import encode_puzzle_hash
from chia.util.byte_types import hexstr_to_bytes
from chia.util.config import load_config
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.ints import uint16, uint32
from chia.wallet.transaction_record import TransactionRecord
from chia.wallet.util.wallet_types import WalletType
async def create_pool_args(pool_url: str) -> Dict:
try:
async with aiohttp.ClientSession() as session:
async with session.get(f"{pool_url}/pool_info") as response:
if response.ok:
json_dict = json.loads(await response.text())
else:
raise ValueError(f"Response from {pool_url} not OK: {response.status}")
except Exception as e:
raise ValueError(f"Error connecting to pool {pool_url}: {e}")
if json_dict["relative_lock_height"] > 1000:
raise ValueError("Relative lock height too high for this pool, cannot join")
if json_dict["protocol_version"] != POOL_PROTOCOL_VERSION:
raise ValueError(f"Incorrect version: {json_dict['protocol_version']}, should be {POOL_PROTOCOL_VERSION}")
header_msg = f"\n---- Pool parameters fetched from {pool_url} ----"
print(header_msg)
pprint(json_dict)
print("-" * len(header_msg))
return json_dict
async def create(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
state = args["state"]
prompt = not args.get("yes", False)
# Could use initial_pool_state_from_dict to simplify
if state == "SELF_POOLING":
pool_url: Optional[str] = None
relative_lock_height = uint32(0)
target_puzzle_hash = None # wallet will fill this in
elif state == "FARMING_TO_POOL":
pool_url = str(args["pool_url"])
json_dict = await create_pool_args(pool_url)
relative_lock_height = json_dict["relative_lock_height"]
target_puzzle_hash = hexstr_to_bytes(json_dict["target_puzzle_hash"])
else:
raise ValueError("Plot NFT must be created in SELF_POOLING or FARMING_TO_POOL state.")
pool_msg = f" and join pool: {pool_url}" if pool_url else ""
print(f"Will create a plot NFT{pool_msg}.")
if prompt:
user_input: str = input("Confirm [n]/y: ")
else:
user_input = "yes"
if user_input.lower() == "y" or user_input.lower() == "yes":
try:
tx_record: TransactionRecord = await wallet_client.create_new_pool_wallet(
target_puzzle_hash,
pool_url,
relative_lock_height,
"localhost:5000",
"new",
state,
)
start = time.time()
while time.time() - start < 10:
await asyncio.sleep(0.1)
tx = await wallet_client.get_transaction(str(1), tx_record.name)
if len(tx.sent_to) > 0:
print(f"Transaction submitted to nodes: {tx.sent_to}")
print(f"Do chia wallet get_transaction -f {fingerprint} -tx 0x{tx_record.name} to get status")
return None
except Exception as e:
print(f"Error creating plot NFT: {e}")
return
print("Aborting.")
async def pprint_pool_wallet_state(
wallet_client: WalletRpcClient,
wallet_id: int,
pool_wallet_info: PoolWalletInfo,
address_prefix: str,
pool_state_dict: Dict,
unconfirmed_transactions: List[TransactionRecord],
):
if pool_wallet_info.current.state == PoolSingletonState.LEAVING_POOL and pool_wallet_info.target is None:
expected_leave_height = pool_wallet_info.singleton_block_height + pool_wallet_info.current.relative_lock_height
print(f"Current state: INVALID_STATE. Please leave/join again after block height {expected_leave_height}")
else:
print(f"Current state: {PoolSingletonState(pool_wallet_info.current.state).name}")
print(f"Current state from block height: {pool_wallet_info.singleton_block_height}")
print(f"Launcher ID: {pool_wallet_info.launcher_id}")
print(
"Target address (not for plotting): "
f"{encode_puzzle_hash(pool_wallet_info.current.target_puzzle_hash, address_prefix)}"
)
print(f"Owner public key: {pool_wallet_info.current.owner_pubkey}")
print(
f"P2 singleton address (pool contract address for plotting): "
f"{encode_puzzle_hash(pool_wallet_info.p2_singleton_puzzle_hash, address_prefix)}"
)
if pool_wallet_info.target is not None:
print(f"Target state: {PoolSingletonState(pool_wallet_info.target.state).name}")
print(f"Target pool URL: {pool_wallet_info.target.pool_url}")
if pool_wallet_info.current.state == PoolSingletonState.SELF_POOLING.value:
balances: Dict = await wallet_client.get_wallet_balance(str(wallet_id))
balance = balances["confirmed_wallet_balance"]
typ = WalletType(int(WalletType.POOLING_WALLET))
address_prefix, scale = wallet_coin_unit(typ, address_prefix)
print(f"Claimable balance: {print_balance(balance, scale, address_prefix)}")
if pool_wallet_info.current.state == PoolSingletonState.FARMING_TO_POOL:
print(f"Current pool URL: {pool_wallet_info.current.pool_url}")
if pool_wallet_info.launcher_id in pool_state_dict:
print(f"Current difficulty: {pool_state_dict[pool_wallet_info.launcher_id]['current_difficulty']}")
print(f"Points balance: {pool_state_dict[pool_wallet_info.launcher_id]['current_points']}")
print(f"Relative lock height: {pool_wallet_info.current.relative_lock_height} blocks")
payout_instructions: str = pool_state_dict[pool_wallet_info.launcher_id]["pool_config"]["payout_instructions"]
try:
payout_address = encode_puzzle_hash(bytes32.fromhex(payout_instructions), address_prefix)
print(f"Payout instructions (pool will pay to this address): {payout_address}")
except Exception:
print(f"Payout instructions (pool will pay you with this): {payout_instructions}")
if pool_wallet_info.current.state == PoolSingletonState.LEAVING_POOL:
expected_leave_height = pool_wallet_info.singleton_block_height + pool_wallet_info.current.relative_lock_height
if pool_wallet_info.target is not None:
print(f"Expected to leave after block height: {expected_leave_height}")
async def show(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
address_prefix = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
summaries_response = await wallet_client.get_wallets()
wallet_id_passed_in = args.get("id", None)
try:
pool_state_list: List = (await farmer_client.get_pool_state())["pool_state"]
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(
f"Connection error. Check if farmer is running at {farmer_rpc_port}."
f" You can run the farmer by:\n chia start farmer-only"
)
else:
print(f"Exception from 'wallet' {e}")
farmer_client.close()
await farmer_client.await_closed()
return
pool_state_dict: Dict[bytes32, Dict] = {
hexstr_to_bytes(pool_state_item["pool_config"]["launcher_id"]): pool_state_item
for pool_state_item in pool_state_list
}
if wallet_id_passed_in is not None:
for summary in summaries_response:
typ = WalletType(int(summary["type"]))
if summary["id"] == wallet_id_passed_in and typ != WalletType.POOLING_WALLET:
print(f"Wallet with id: {wallet_id_passed_in} is not a pooling wallet. Please provide a different id.")
return
pool_wallet_info, unconfirmed_transactions = await wallet_client.pw_status(wallet_id_passed_in)
await pprint_pool_wallet_state(
wallet_client,
wallet_id_passed_in,
pool_wallet_info,
address_prefix,
pool_state_dict,
unconfirmed_transactions,
)
else:
print(f"Wallet height: {await wallet_client.get_height_info()}")
print(f"Sync status: {'Synced' if (await wallet_client.get_synced()) else 'Not synced'}")
for summary in summaries_response:
wallet_id = summary["id"]
typ = WalletType(int(summary["type"]))
if typ == WalletType.POOLING_WALLET:
print(f"Wallet id {wallet_id}: ")
pool_wallet_info, unconfirmed_transactions = await wallet_client.pw_status(wallet_id)
await pprint_pool_wallet_state(
wallet_client,
wallet_id,
pool_wallet_info,
address_prefix,
pool_state_dict,
unconfirmed_transactions,
)
print("")
farmer_client.close()
await farmer_client.await_closed()
async def get_login_link(launcher_id_str: str) -> None:
launcher_id: bytes32 = hexstr_to_bytes(launcher_id_str)
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
try:
login_link: Optional[str] = await farmer_client.get_pool_login_link(launcher_id)
if login_link is None:
print("Was not able to get login link.")
else:
print(login_link)
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(
f"Connection error. Check if farmer is running at {farmer_rpc_port}."
f" You can run the farmer by:\n chia start farmer-only"
)
else:
print(f"Exception from 'farmer' {e}")
finally:
farmer_client.close()
await farmer_client.await_closed()
async def submit_tx_with_confirmation(
message: str, prompt: bool, func: Callable, wallet_client: WalletRpcClient, fingerprint: int, wallet_id: int
):
print(message)
if prompt:
user_input: str = input("Confirm [n]/y: ")
else:
user_input = "yes"
if user_input.lower() == "y" or user_input.lower() == "yes":
try:
tx_record: TransactionRecord = await func()
start = time.time()
while time.time() - start < 10:
await asyncio.sleep(0.1)
tx = await wallet_client.get_transaction(str(1), tx_record.name)
if len(tx.sent_to) > 0:
print(f"Transaction submitted to nodes: {tx.sent_to}")
print(f"Do chia wallet get_transaction -f {fingerprint} -tx 0x{tx_record.name} to get status")
return None
except Exception as e:
print(f"Error performing operation on Plot NFT -f {fingerprint} wallet id: {wallet_id}: {e}")
return
print("Aborting.")
async def join_pool(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
pool_url = args["pool_url"]
wallet_id = args.get("id", None)
prompt = not args.get("yes", False)
try:
async with aiohttp.ClientSession() as session:
async with session.get(f"{pool_url}/pool_info") as response:
if response.ok:
json_dict = json.loads(await response.text())
else:
print(f"Response not OK: {response.status}")
return
except Exception as e:
print(f"Error connecting to pool {pool_url}: {e}")
return
if json_dict["relative_lock_height"] > 1000:
print("Relative lock height too high for this pool, cannot join")
return
if json_dict["protocol_version"] != POOL_PROTOCOL_VERSION:
print(f"Incorrect version: {json_dict['protocol_version']}, should be {POOL_PROTOCOL_VERSION}")
return
pprint(json_dict)
msg = f"\nWill join pool: {pool_url} with Plot NFT {fingerprint}."
func = functools.partial(
wallet_client.pw_join_pool,
wallet_id,
hexstr_to_bytes(json_dict["target_puzzle_hash"]),
pool_url,
json_dict["relative_lock_height"],
)
await submit_tx_with_confirmation(msg, prompt, func, wallet_client, fingerprint, wallet_id)
async def self_pool(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args.get("id", None)
prompt = not args.get("yes", False)
msg = f"Will start self-farming with Plot NFT on wallet id {wallet_id} fingerprint {fingerprint}."
func = functools.partial(wallet_client.pw_self_pool, wallet_id)
await submit_tx_with_confirmation(msg, prompt, func, wallet_client, fingerprint, wallet_id)
async def inspect_cmd(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args.get("id", None)
pool_wallet_info, unconfirmed_transactions = await wallet_client.pw_status(wallet_id)
print(
{
"pool_wallet_info": pool_wallet_info,
"unconfirmed_transactions": [
{"sent_to": tx.sent_to, "transaction_id": tx.name.hex()} for tx in unconfirmed_transactions
],
}
)
async def claim_cmd(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args.get("id", None)
msg = f"\nWill claim rewards for wallet ID: {wallet_id}."
func = functools.partial(
wallet_client.pw_absorb_rewards,
wallet_id,
)
await submit_tx_with_confirmation(msg, False, func, wallet_client, fingerprint, wallet_id)
|
py | b40a5076c84f2ba2cdc7ebd791de75fa051c19ec | from weakref import WeakKeyDictionary
import random
from animation.animation_utils import flush_all_animations
from cas.cas import get_caspart_bodytype
from element_utils import build_critical_section
from event_testing.resolver import SingleSimResolver
from event_testing.tests import TunableGlobalTestSet
from interactions import ParticipantType, ParticipantTypeSingle
from interactions.liability import Liability
from sims.outfits import outfit_utils
from sims.outfits.outfit_enums import OutfitCategory, OutfitFilterFlag, SpecialOutfitIndex, OutfitChangeReason, CLOTHING_BODY_TYPES, REGULAR_OUTFIT_CATEGORIES
from sims.outfits.outfit_generator import TunableOutfitGeneratorSnippet
from sims.outfits.outfit_utils import get_maximum_outfits_for_category
from sims4.tuning.tunable import AutoFactoryInit, HasTunableSingletonFactory, TunableVariant, OptionalTunable, TunableEnumEntry, HasTunableFactory, TunableTuple, Tunable, TunableList
from singletons import DEFAULT
import services
import sims4.log
logger = sims4.log.Logger('Outfits', default_owner='rfleig')
SPECIAL_OUTFIT_KEY = (OutfitCategory.SPECIAL, 0)
class OutfitChangeBase(HasTunableSingletonFactory, AutoFactoryInit):
def __bool__(self):
return True
def has_entry_change(self, interaction, **kwargs):
raise NotImplementedError
def has_exit_change(self, interaction, **kwargs):
raise NotImplementedError
def get_on_entry_change(self, interaction, **kwargs):
raise NotImplementedError
def get_on_exit_change(self, interaction, **kwargs):
raise NotImplementedError
def get_on_entry_outfit(self, interaction, **kwargs):
raise NotImplementedError
def get_on_exit_outfit(self, interaction, **kwargs):
raise NotImplementedError
class TunableOutfitChange(TunableVariant):
class _OutfitChangeNone(OutfitChangeBase):
def __bool__(self):
return False
def has_entry_change(self, interaction, **kwargs):
return False
def has_exit_change(self, interaction, **kwargs):
return False
def get_on_entry_change(self, interaction, **kwargs):
pass
def get_on_exit_change(self, interaction, **kwargs):
pass
def get_on_entry_outfit(self, interaction, **kwargs):
pass
def get_on_exit_outfit(self, interaction, **kwargs):
pass
class _OutfitChangeForReason(OutfitChangeBase):
FACTORY_TUNABLES = {'on_entry': OptionalTunable(description='\n When enabled, define the change reason to apply on posture\n entry.\n ', tunable=TunableEnumEntry(tunable_type=OutfitChangeReason, default=OutfitChangeReason.Invalid)), 'on_exit': OptionalTunable(description='\n When enabled, define the change reason to apply on posture\n exit.\n ', tunable=TunableEnumEntry(tunable_type=OutfitChangeReason, default=OutfitChangeReason.Invalid))}
def _get_outfit_resolver_and_sim_info(self, interaction, sim_info=DEFAULT):
if sim_info is DEFAULT:
return (None, interaction.sim.sim_info)
return (SingleSimResolver(sim_info), sim_info)
def has_entry_change(self, interaction, **kwargs):
return self.on_entry is not None
def has_exit_change(self, interaction, **kwargs):
return self.on_exit is not None
def get_on_entry_change(self, interaction, sim_info=DEFAULT, **kwargs):
(resolver, sim_info) = self._get_outfit_resolver_and_sim_info(interaction, sim_info=sim_info)
return sim_info.get_outfit_change(interaction, self.on_entry, resolver=resolver, **kwargs)
def get_on_exit_change(self, interaction, sim_info=DEFAULT, **kwargs):
(resolver, sim_info) = self._get_outfit_resolver_and_sim_info(interaction, sim_info=sim_info)
return sim_info.get_outfit_change(interaction, self.on_exit, resolver=resolver, **kwargs)
def get_on_entry_outfit(self, interaction, sim_info=DEFAULT):
if self.on_entry is not None:
(resolver, sim_info) = self._get_outfit_resolver_and_sim_info(interaction, sim_info=sim_info)
return sim_info.get_outfit_for_clothing_change(interaction, self.on_entry, resolver=resolver)
def get_on_exit_outfit(self, interaction, sim_info=DEFAULT):
if self.on_exit is not None:
(resolver, sim_info) = self._get_outfit_resolver_and_sim_info(interaction, sim_info=sim_info)
return sim_info.get_outfit_for_clothing_change(interaction, self.on_exit, resolver=resolver)
class _OutfitChangeForTags(OutfitChangeBase):
@staticmethod
def _verify_tunable_callback(instance_class, tunable_name, source, value, **kwargs):
if value.on_entry and value.on_entry.auto_undo_on_exit and value.on_exit is not None:
logger.error('{} has tuned both on_entry.auto_undo_on_exit and on_exit in a For Tags outfit change. These two things conflict.', instance_class, owner='rfleig')
class OutfitTypeSpecial(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'special_outfit_index': TunableEnumEntry(description='\n The Special outfit index to use when creating the outfit using\n the provided flags. There are multiple Special outfits that \n are indexed by the entries in the SpecialOutfitIndex enum.\n \n GPE NOTE:\n If you want to add a new index you will need to add a value\n to SpecialOutfitIndex as well as change the values in \n outfit_tuning.py and OutfitTypes.h to allow for more special\n outfits.\n ', tunable_type=SpecialOutfitIndex, default=SpecialOutfitIndex.DEFAULT)}
def get_outfit(self, *args):
return (OutfitCategory.SPECIAL, self.special_outfit_index)
def __call__(self, sim_info, outfit_generator):
if self.special_outfit_index > 0:
for i in range(0, self.special_outfit_index):
if not sim_info.has_outfit((OutfitCategory.SPECIAL, i)):
sim_info.generate_outfit(OutfitCategory.SPECIAL, i)
outfit_generator(sim_info, OutfitCategory.SPECIAL, outfit_index=self.special_outfit_index)
return (OutfitCategory.SPECIAL, self.special_outfit_index)
class OutfitTypeCurrent(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'restrict_to_regular': Tunable(description='\n If checked, the Sim will switch out of any non-regular\n outfits (and into Everyday) before applying the\n modification.\n \n If this is unchecked, the Sim will modify whatever outfit\n they are wearing, including, for example, career outfits.\n The modification is permanent.\n ', tunable_type=bool, default=True)}
def get_outfit(self, sim_info, *args):
if sim_info is not DEFAULT:
(outfit_category, outfit_index) = sim_info.get_current_outfit()
else:
outfit_category = OutfitCategory.SPECIAL
outfit_index = SpecialOutfitIndex.DEFAULT
if self.restrict_to_regular:
if outfit_category not in REGULAR_OUTFIT_CATEGORIES:
outfit_category = OutfitCategory.EVERYDAY
outfit_index = 0
return (outfit_category, outfit_index)
def __call__(self, sim_info, outfit_generator):
(outfit_category, outfit_index) = self.get_outfit(sim_info)
outfit_generator(sim_info, outfit_category, outfit_index=outfit_index)
return (outfit_category, outfit_index)
class OutfitTypeCategory(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'outfit_category': TunableEnumEntry(description='\n Outfit Category\n ', tunable_type=OutfitCategory, default=OutfitCategory.EVERYDAY, invalid_enums=(OutfitCategory.CURRENT_OUTFIT,))}
def get_outfit(self, *args):
return (self.outfit_category, 0)
def __call__(self, sim_info, outfit_generator):
outfit_generator(sim_info, self.outfit_category)
return (self.outfit_category, 0)
FACTORY_TUNABLES = {'on_entry': OptionalTunable(description='\n The tuning for how to handle the outfit change on entry of\n the new context.\n ', tunable=TunableTuple(description='\n Contains the tags used to tune the outfit and also\n a preference for whether or not to automatically switch out\n of the tags outfit when on exit.\n ', outfit_to_modify=TunableVariant(description='\n The outfit we want to generate over.\n ', current=OutfitTypeCurrent.TunableFactory(), outfit_category=OutfitTypeCategory.TunableFactory(), special=OutfitTypeSpecial.TunableFactory(), default='special'), generator=TunableOutfitGeneratorSnippet(), do_spin=Tunable(description='\n If checked, the Sim will animate and perform a clothing\n change spin. If unchecked, the Sim will change outfits\n without animating.\n ', tunable_type=bool, default=True), auto_undo_on_exit=Tunable(description="\n If True then the Sim will switch out of the entry tag\n outfit on exit. \n If False then the Sim will stay in the tag outfit.\n \n NOTE: This tuning conflicts with the On Exit tuning. If\n this is set to true and On Exit is enabled then an \n error should occur on load because you can't both switch\n out of the tag outfit and switch into a different tag\n outfit.\n ", tunable_type=bool, default=True)), enabled_by_default=True), 'on_exit': OptionalTunable(description='\n The clothing change that happens on exit of the current context.\n ', tunable=TunableList(description='\n A list of (tests, clothing change) tuples. The first entry\n that passes all of its tests will be used while the other\n entries after that one will be ignored. So the order of the \n list is essentially priority.\n ', tunable=TunableTuple(description='\n A tuple of clothing changes and tests for whether they\n should happen or not.\n ', outfit_to_modify=TunableVariant(description='\n The outfit we want to generate over.\n ', current=OutfitTypeCurrent.TunableFactory(), outfit_category=OutfitTypeCategory.TunableFactory(), special=OutfitTypeSpecial.TunableFactory(), default='special'), generator=TunableOutfitGeneratorSnippet(), tests=TunableGlobalTestSet(description='\n Tests to run when deciding which clothing change\n entry to use. All of the tests must pass in order \n for the item to pass.\n ')))), 'verify_tunable_callback': _verify_tunable_callback}
def has_entry_change(self, interaction, **kwargs):
return self.on_entry is not None
def has_exit_change(self, interaction, **kwargs):
return self.on_exit is not None
def get_on_entry_change(self, interaction, sim_info=DEFAULT, do_spin=True, **kwargs):
if not self.on_entry:
return
sim_info = interaction.sim.sim_info if sim_info is DEFAULT else sim_info
do_spin &= self.on_entry.do_spin
for trait in sim_info.get_traits():
outfit_change_reason = trait.get_outfit_change_reason(None)
if outfit_change_reason is not None:
return sim_info.get_outfit_change(interaction, outfit_change_reason, do_spin=do_spin, **kwargs)
(category, index) = self.on_entry.outfit_to_modify(sim_info, self.on_entry.generator)
return build_critical_section(sim_info.get_change_outfit_element_and_archive_change_reason((category, index), do_spin=do_spin, interaction=interaction, change_reason=interaction, **kwargs), flush_all_animations)
def get_on_exit_change(self, interaction, sim_info=DEFAULT, **kwargs):
sim_info = interaction.sim.sim_info if sim_info is DEFAULT else sim_info
if not self.on_exit and self.on_entry is not None and self.on_entry.auto_undo_on_exit:
return sim_info.get_outfit_change(interaction, OutfitChangeReason.CurrentOutfit, **kwargs)
if self.on_exit:
choice = self.choose_on_exit_clothing_change(sim_info)
if choice is None:
return
else:
(category, index) = choice.outfit_to_modify(sim_info, choice.generator)
return build_critical_section(sim_info.get_change_outfit_element_and_archive_change_reason((category, index), interaction=interaction, change_reason=interaction, **kwargs), flush_all_animations)
def choose_on_exit_clothing_change(self, sim_info):
resolver = SingleSimResolver(sim_info)
for outfit_change in self.on_exit:
result = outfit_change.tests.run_tests(resolver)
if result:
return outfit_change
def get_on_entry_outfit(self, interaction, sim_info=DEFAULT):
if self.on_entry is not None:
return self.on_entry.outfit_to_modify.get_outfit(sim_info)
def get_on_exit_outfit(self, interaction, sim_info=DEFAULT):
if sim_info is DEFAULT:
sim_info = interaction.sim.sim_info
resolver = None
else:
resolver = SingleSimResolver(sim_info)
if not self.on_exit and self.on_entry is not None and self.on_entry.auto_undo_on_exit:
return sim_info.get_outfit_for_clothing_change(interaction, OutfitChangeReason.CurrentOutfit, resolver=resolver)
if self.on_exit:
choice = self.choose_on_exit_clothing_change(sim_info)
if choice is None:
return
else:
return choice.outfit_to_modify(sim_info, choice.generator)
class _OutfitChangeFromPickedItemId(OutfitChangeBase):
class _OnEntry(HasTunableSingletonFactory):
@property
def is_entry_change(self):
return True
@property
def is_exit_change(self):
return False
class _OnExit(HasTunableSingletonFactory):
@property
def is_entry_change(self):
return False
@property
def is_exit_change(self):
return True
FACTORY_TUNABLES = {'timing': TunableVariant(description='\n Define when this outfit change happens.\n ', on_entry=_OnEntry.TunableFactory(), on_exit=_OnExit.TunableFactory(), default='on_entry')}
def _get_outfit(self, interaction):
outfits = interaction.get_participants(ParticipantType.PickedItemId)
if not outfits:
return
outfit = next(iter(outfits))
return outfit
def _get_outfit_change(self, interaction, sim_info=DEFAULT, **kwargs):
outfit = self._get_outfit(interaction)
if outfit is not None:
sim_info = interaction.sim.sim_info if sim_info is DEFAULT else sim_info
return build_critical_section(sim_info.get_change_outfit_element_and_archive_change_reason(outfit, interaction=interaction, change_reason=interaction, **kwargs), flush_all_animations)
def has_entry_change(self, interaction, **kwargs):
return self.timing.is_entry_change
def has_exit_change(self, interaction, **kwargs):
return self.timing.is_exit_change
def get_on_entry_change(self, *args, **kwargs):
if self.timing.is_entry_change:
return self._get_outfit_change(*args, **kwargs)
def get_on_exit_change(self, *args, **kwargs):
if self.timing.is_exit_change:
return self._get_outfit_change(*args, **kwargs)
def get_on_entry_outfit(self, interaction, sim_info=DEFAULT):
if self.timing.is_entry_change:
return self._get_outfit(interaction)
def get_on_exit_outfit(self, interaction, sim_info=DEFAULT):
if self.timing.is_exit_change:
return self._get_outfit(interaction)
class _OutfitChangeWithState(OutfitChangeBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._outfit_change_map = WeakKeyDictionary()
def has_entry_change(self, interaction, **kwargs):
return self.get_on_entry_outfit(interaction, **kwargs) is not None
def has_exit_change(self, interaction, **kwargs):
return self.get_on_exit_outfit(interaction, **kwargs) is not None
def _get_outfit_change_internal(self, interaction, sim_info):
sim_map = self._outfit_change_map.get(interaction)
if sim_map is None:
sim_map = WeakKeyDictionary()
self._outfit_change_map[interaction] = sim_map
change = sim_map.get(sim_info)
if change is None:
change = self._create_outfit_change_internal(interaction, sim_info)
sim_map[sim_info] = change
return change
def _create_outfit_change_internal(self, interaction, sim_info):
raise NotImplementedError
def get_on_entry_change(self, interaction, sim_info=DEFAULT, **kwargs):
sim_info = interaction.sim.sim_info if sim_info is DEFAULT else sim_info
for trait in sim_info.get_traits():
outfit_change_reason = trait.get_outfit_change_reason(None)
if outfit_change_reason is not None:
return sim_info.get_outfit_change(interaction, outfit_change_reason, **kwargs)
outfit_change = self._get_outfit_change_internal(interaction, sim_info)
if outfit_change is not None:
return build_critical_section(sim_info.get_change_outfit_element_and_archive_change_reason(outfit_change.entry_outfit, interaction=interaction, change_reason=interaction, **kwargs), flush_all_animations)
def get_on_exit_change(self, interaction, sim_info=DEFAULT, **kwargs):
sim_info = interaction.sim.sim_info if sim_info is DEFAULT else sim_info
outfit_change = self._get_outfit_change_internal(interaction, sim_info)
if outfit_change is not None:
return build_critical_section(sim_info.get_change_outfit_element_and_archive_change_reason(outfit_change.exit_outfit, interaction=interaction, change_reason=interaction, **kwargs), flush_all_animations)
def get_on_entry_outfit(self, interaction, sim_info=DEFAULT):
sim_info = interaction.sim.sim_info if sim_info is DEFAULT else sim_info
outfit_change = self._get_outfit_change_internal(interaction, sim_info)
if outfit_change is not None:
return outfit_change.entry_outfit
def get_on_exit_outfit(self, interaction, sim_info=DEFAULT):
sim_info = interaction.sim.sim_info if sim_info is DEFAULT else sim_info
outfit_change = self._get_outfit_change_internal(interaction, sim_info)
if outfit_change is not None:
return outfit_change.exit_outfit
class _OutfitChangeFromZone(_OutfitChangeWithState):
FACTORY_TUNABLES = {'auto_undo_on_exit': Tunable(description='\n If checked, the Sim will use the previous outfit as the\n on_exit outfit for this outfit change.\n \n Has no effect for outfit changes that do not perform an on_exit\n change, such as on_route outfit changes.\n ', tunable_type=bool, default=True)}
def _create_outfit_change_internal(self, interaction, sim_info):
current_outfit = sim_info.get_current_outfit()
self.entry_outfit = None
self.exit_outfit = current_outfit if self.auto_undo_on_exit else None
if sim_info.is_wearing_outfit(SPECIAL_OUTFIT_KEY):
return self
zone_director = services.venue_service().get_zone_director()
if zone_director is None:
return self
(zone_outfit, outfit_key) = zone_director.get_zone_outfit(sim_info)
if zone_outfit is None:
return self
if sim_info.is_wearing_outfit(outfit_key) and outfit_utils.is_sim_info_wearing_all_outfit_parts(sim_info, zone_outfit, outfit_key):
return self
sim_info.generate_merged_outfit(zone_outfit, SPECIAL_OUTFIT_KEY, sim_info.get_current_outfit(), outfit_key)
self.entry_outfit = SPECIAL_OUTFIT_KEY
return self
class _OutfitChangeFromParticipant(_OutfitChangeWithState):
class _OutfitChangeTemporary(HasTunableFactory, AutoFactoryInit):
def __init__(self, sim_info, outfit_source, *args, **kwargs):
super().__init__(*args, **kwargs)
outfits = outfit_source.get_outfits()
source_outfit = outfit_source.get_current_outfit()
sim_info.generate_merged_outfit(outfits.get_sim_info(), SPECIAL_OUTFIT_KEY, sim_info.get_current_outfit(), source_outfit)
self.entry_outfit = SPECIAL_OUTFIT_KEY
self.exit_outfit = sim_info.get_current_outfit()
class _OutfitChangeAddition(HasTunableFactory, AutoFactoryInit):
def __init__(self, sim_info, outfit_source, *args, **kwargs):
super().__init__(*args, **kwargs)
source_outfit = outfit_source.get_current_outfit()
source_category = source_outfit[0]
source_outfits = outfit_source.get_outfits()
target_outfits = sim_info.get_outfits()
outfits_in_category = target_outfits.get_outfits_in_category(source_category)
outfits_in_category = len(outfits_in_category) if outfits_in_category is not None else 0
current_outfit = sim_info.get_current_outfit()
if outfits_in_category >= get_maximum_outfits_for_category(source_category):
available_outfits = [(source_category, index) for index in range(1, outfits_in_category) if (source_category, index) != current_outfit]
destination_outfit = random.choice(available_outfits)
else:
destination_outfit = target_outfits.get_next_outfit_for_category(source_category)
sim_info.generate_merged_outfit(source_outfits.get_sim_info(), destination_outfit, current_outfit, source_outfit)
self.entry_outfit = destination_outfit
self.exit_outfit = None
FACTORY_TUNABLES = {'outfit_participant': TunableEnumEntry(description='\n The Sim or object whose current outfit is going to be\n temporarily applied to the Sim being affected by this change.\n ', tunable_type=ParticipantTypeSingle, default=ParticipantTypeSingle.Object), 'outfit_change_behavior': TunableVariant(description='\n Define how this outfit is to be applied to the Sim.\n ', temporary=_OutfitChangeTemporary.TunableFactory(), addition=_OutfitChangeAddition.TunableFactory(), default='temporary')}
def _create_outfit_change_internal(self, interaction, sim_info):
outfit_participant = interaction.get_participant(self.outfit_participant)
if outfit_participant is None:
return
return self.outfit_change_behavior(sim_info, outfit_participant)
class _OutfitChangeForNew(_OutfitChangeWithState):
class _OutfitChangeGeneration(HasTunableFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'outfit_category': TunableEnumEntry(description="\n The outfit category for which we're creating a new outfit.\n ", tunable_type=OutfitCategory, default=OutfitCategory.EVERYDAY)}
def __init__(self, interaction, sim_info, *args, **kwargs):
super().__init__(*args, **kwargs)
outfits = sim_info.get_outfits()
current_outfit = sim_info.get_current_outfit()
outfits_in_category = outfits.get_outfits_in_category(self.outfit_category)
outfits_in_category = len(outfits_in_category) if outfits_in_category is not None else 0
if outfits_in_category >= get_maximum_outfits_for_category(self.outfit_category):
available_outfits = [(self.outfit_category, index) for index in range(1, outfits_in_category) if (self.outfit_category, index) != current_outfit]
(_, outfit_index) = random.choice(available_outfits)
else:
(_, outfit_index) = outfits.get_next_outfit_for_category(self.outfit_category)
sim_info.generate_outfit(outfit_category=self.outfit_category, outfit_index=outfit_index, filter_flag=OutfitFilterFlag.NONE)
self.entry_outfit = (self.outfit_category, outfit_index)
self.exit_outfit = None
FACTORY_TUNABLES = {'outfit_change_behavior': _OutfitChangeGeneration.TunableFactory()}
def _create_outfit_change_internal(self, interaction, sim_info):
return self.outfit_change_behavior(interaction, sim_info)
def __init__(self, allow_outfit_change=True, **kwargs):
options = {'no_change': TunableOutfitChange._OutfitChangeNone.TunableFactory()}
if allow_outfit_change:
options['for_reason'] = TunableOutfitChange._OutfitChangeForReason.TunableFactory()
options['for_tags'] = TunableOutfitChange._OutfitChangeForTags.TunableFactory()
options['for_new'] = TunableOutfitChange._OutfitChangeForNew.TunableFactory()
options['from_participant'] = TunableOutfitChange._OutfitChangeFromParticipant.TunableFactory()
options['from_zone'] = TunableOutfitChange._OutfitChangeFromZone.TunableFactory()
options['from_picker'] = TunableOutfitChange._OutfitChangeFromPickedItemId.TunableFactory()
kwargs.update(options)
super().__init__(default='no_change', **kwargs)
class InteractionOnRouteOutfitChange(TunableVariant):
def __init__(self, **kwargs):
super().__init__(no_change=TunableOutfitChange._OutfitChangeNone.TunableFactory(), for_reason=TunableOutfitChange._OutfitChangeForReason.TunableFactory(on_entry=TunableEnumEntry(description='\n Define the change reason to apply on\n entry.\n ', tunable_type=OutfitChangeReason, default=OutfitChangeReason.Invalid), locked_args={'on_exit': None}), from_zone=TunableOutfitChange._OutfitChangeFromZone.TunableFactory(locked_args={'auto_undo_on_exit': False}), default='no_change', **kwargs)
class ChangeOutfitLiability(Liability, HasTunableFactory, AutoFactoryInit):
LIABILITY_TOKEN = 'ChangeOutfitLiability'
FACTORY_TUNABLES = {'subject': TunableEnumEntry(description='\n The participant of this interaction that is going to have\n the specified affordance pushed upon them.\n ', tunable_type=ParticipantType, default=ParticipantType.Actor), 'outfit_change': TunableOutfitChange(description='\n The outfit change we want to perform if the interaction does not\n finish naturally.\n ')}
def __init__(self, interaction, *args, **kwargs):
super().__init__(*args, **kwargs)
self._interaction = interaction
def on_add(self, interaction):
self._interaction = interaction
def release(self):
sim = self._interaction.get_participant(self.subject)
outfit = self.outfit_change.get_on_entry_outfit(self._interaction, sim_info=sim.sim_info)
if outfit is None:
outfit = self.outfit_change.get_on_exit_outfit(self._interaction, sim_info=sim.sim_info)
if outfit is not None:
sim.sim_info.set_current_outfit(outfit)
super().release()
|
py | b40a51cc14425c2bd164ff3891c2495fd0bbc1aa | """
Author: Itiel Lopez - [email protected]
Created: 29/07/2021
"""
"""
NOTE:
You'll probably notice that here we do some things that
might be considered unncesary in python. Like, why don't
we just split the asm file into lines and then we scan it?
The thing is, this program is supoused to be exported to C
later. We're just using python first to speed development.
The goal is to make this assembler part of the VM program
available as a command (maybe?).
"""
from enum import Enum, IntEnum, unique
import sys
# a-Z
def ch_is_alph (ch):
return (
(ch >= 'a' and ch <= 'z') or
(ch >= 'A' and ch <= 'Z')
)
# 0-9
def ch_is_dig (ch):
return ch >= '0' and ch <= '9'
# 0-9 || a-F
def ch_is_hex (ch):
return (
ch_is_dig(ch) or
(ch >= 'a' and ch <= 'f') or
(ch >= 'A' and ch <= 'F')
)
# 0-7
def ch_is_oct (ch):
return ch >= '0' and ch <= '7'
class Token:
@unique
class Types (Enum):
FILE_START = 0
NAME = 1
SIGN = 2
NUMBER = 3
STRING = 4
CHAR = 5
COMMENT = 6
LINE_BREAK = 7
FILE_END = 8
def __init__ (self, tk_row, tk_col, tk_type, tk_str):
# TODO: Add char idx and str length
self.row = tk_row
self.col = tk_col
self.type = tk_type
self.str = tk_str
def __str__ (self):
return (
f'<Token row: {self.row},\tcol: {self.col},\t'
f'type: {self.type},\tstr: {repr(self.str)}>'
)
class Tokenizer:
@unique
class States (Enum):
BLANK = 0
NAME = 1
NUMBER = 2
OCT_NUM = 3
HEX_NUM = 4
SIGN = 5
STRING = 6
STR_ESC = 7
CHAR = 8
COMMENT = 9
def _set_data (self, file_name, file_txt):
if file_name != None:
self.set_file_name(file_name)
elif file_txt != None:
self.set_file_txt(file_txt)
def set_file_name (self, file_name, load = True):
if not isinstance(file_name, str):
raise Exception(
'\n Tokenizer.set_file_name():\n'
' Argument should be a string'
)
self._file_name = file_name
self._file_txt = None
if load:
with open(file_name, 'r') as asm_file:
self.set_file_txt(asm_file.read())
def set_file_txt (self, file_txt):
if not isinstance(file_txt, str):
raise Exception(
'\n Tokenizer.set_file_txt():\n'
' Argument should be a string'
)
self._file_txt = file_txt
def __init__ (self, file_name = None, file_txt = None):
self._file_name = None
self._file_txt = None
self._set_data(file_name, file_txt)
def _tokenize_error (self, msg, row, col):
file_name = (
repr(self._file_name) if
self._file_name != None else
'<file>'
)
extr = self._file_txt.split('\n')[row]
extr = extr.replace('\t', ' ')
arrow = (' ' * col) + '^'
error_msg = (
f'\n Error in {file_name} @ '
f'(row: {row + 1}, col: {col + 1})\n'
f' {msg}\n'
f' {extr}\n'
f' {arrow}'
)
raise Exception(error_msg)
def tokenize (self, file_name = None, file_txt = None):
self._set_data(file_name, file_txt)
if self._file_txt == None:
raise Exception(
'\n Tokenizer.tokenize():\n'
' No file name or file text was provided.\n'
' Try using:\n'
' Tokenizer.set_file_name()\n'
' Tokenizer.set_file_txt()'
)
saved_ch = None
saved_state = None
saved_type = None
saved_int = None
on_sep = False
on_break = False
lead_zero = False
esc_char = False
tk_row = int()
tk_col = int()
tk_type = None
tk_str = str()
tk_tps = Token.Types
tkr_sts = Tokenizer.States
state = tkr_sts.BLANK
col = -1
# Start of file
yield Token(None, None, tk_tps.FILE_START, None)
for i, ch in enumerate(self._file_txt):
col += 1
if ch == '\n':
saved_state = tkr_sts.BLANK
saved_int = col
on_break = True
on_sep = True
if state != tkr_sts.STRING:
if ch in ' \t':
saved_state = tkr_sts.BLANK
on_sep = True
elif ch in '+-':
if state == tkr_sts.SIGN:
yield Token(
tk_row, saved_int, tk_type, tk_str)
saved_state = tkr_sts.SIGN
saved_type = tk_tps.SIGN
saved_int = col
on_sep = True
elif ch == '"':
saved_state = tkr_sts.STRING
saved_type = tk_tps.STRING
tk_col = col
on_sep = True
elif ch == "'":
saved_state = tkr_sts.CHAR
saved_type = tk_tps.CHAR
tk_col = col
on_sep = True
elif ch == ';':
saved_state = tkr_sts.COMMENT
saved_type = tk_tps.COMMENT
tk_col = col
on_sep = True
# First iteration or after space or tab
if state == tkr_sts.BLANK:
if on_sep:
on_sep = False
state = saved_state
tk_type = saved_type
tk_str = ch
if on_break:
on_break = False
yield Token(
tk_row, saved_int, tk_tps.LINE_BREAK, None)
tk_row += 1
col = -1
continue
tk_str = str()
if ch_is_alph(ch):
state = tkr_sts.NAME
tk_col = col
tk_type = tk_tps.NAME
tk_str = ch
continue
if ch_is_dig(ch):
state = tkr_sts.NUMBER
tk_col = col
tk_type = tk_tps.NUMBER
tk_str = ch
if ch == '0':
lead_zero = True
continue
self._tokenize_error('Invalid syntax', tk_row, col)
# Instruction names
# (Maybe also variable names
# and booleans as in 'true' & 'false')
elif state == tkr_sts.NAME:
if on_sep:
on_sep = False
yield Token(tk_row, tk_col, tk_type, tk_str)
state = saved_state
tk_type = saved_type
tk_str = ch
if on_break:
on_break = False
yield Token(
tk_row, saved_int, tk_tps.LINE_BREAK, None)
tk_row += 1
col = -1
continue
# Allow alphanumeric names as long
# as the first character is a-Z
if ch_is_alph(ch) or ch_is_dig(ch):
tk_str += ch
continue
self._tokenize_error('Invalid name syntax', tk_row, col)
# Numbers
# Only decimal, hex and octal integers
# At the moment, no float or E notation
elif state == tkr_sts.NUMBER:
if on_sep:
on_sep = False
# TODO: Disallow leading-zero decimal integers.
# Except for zero ("0"), of course
lead_zero = False
yield Token(tk_row, tk_col, tk_type, tk_str)
state = saved_state
tk_type = saved_type
tk_str = ch
if on_break:
on_break = False
yield Token(
tk_row, saved_int, tk_tps.LINE_BREAK, None)
tk_row += 1
col = -1
continue
if ch_is_dig(ch):
tk_str += ch
continue
if (col - tk_col == 1) and lead_zero:
if ch in ['o', 'O']:
state = tkr_sts.OCT_NUM
tk_str += ch
saved_ch = ch
continue
if ch in ['x', 'X']:
state = tkr_sts.HEX_NUM
tk_str += ch
saved_ch = ch
continue
self._tokenize_error(
'Invalid number syntax', tk_row, col)
# Octal number
elif state == tkr_sts.OCT_NUM:
if on_sep:
on_sep = False
if not ch_is_oct(saved_ch):
self._tokenize_error(
'Invalid octal syntax', tk_row, col)
lead_zero = False
yield Token(tk_row, tk_col, tk_type, tk_str)
state = saved_state
tk_type = saved_type
tk_str = ch
if on_break:
on_break = False
yield Token(
tk_row, saved_int, tk_tps.LINE_BREAK, None)
tk_row += 1
col = -1
continue
if ch_is_oct(ch):
tk_str += ch
saved_ch = ch
continue
self._tokenize_error('Invalid octal syntax', tk_row, col)
# Hexadecimal number
elif state == tkr_sts.HEX_NUM:
if on_sep:
on_sep = False
if not ch_is_hex(saved_ch):
self._tokenize_error(
'Invalid hexadecimal syntax',
tk_row, col
)
lead_zero = False
yield Token(tk_row, tk_col, tk_type, tk_str)
state = saved_state
tk_type = saved_type
tk_str = ch
if on_break:
on_break = False
yield Token(
tk_row, saved_int, tk_tps.LINE_BREAK, None)
tk_row += 1
col = -1
continue
if ch_is_hex(ch):
tk_str += ch
saved_ch = ch
continue
self._tokenize_error(
'Invalid hexadecimal syntax', tk_row, col)
# Sign (+-)
elif state == tkr_sts.SIGN:
# Here would go other single-char
# tokens like ()*$#
if saved_state != tkr_sts.SIGN:
yield Token(tk_row, saved_int, tk_type, tk_str)
state = saved_state
tk_type = saved_type
tk_str = ch
if on_break:
on_break = False
yield Token(
tk_row, saved_int, tk_tps.LINE_BREAK, None)
tk_row += 1
col = -1
continue
# String (starts with ")
# TODO: It has trouble scanning empty strings ("")
# correctly. Rectify that.
elif state == tkr_sts.STRING:
if on_break:
self._tokenize_error(
'Line break in string literal',
tk_row, col)
if ch == "\\":
esc_char = True
continue
if esc_char:
esc_char = False
if ch == '\\':
tk_str += '\\'
elif ch == '"':
tk_str += '"'
elif ch == "'":
tk_str += "'"
elif ch == 'n':
tk_str += '\n'
elif ch == 'r':
tk_str += '\r'
elif ch == 't':
tk_str += '\t'
elif ch == 'b':
tk_str += '\b'
elif ch == 'f':
tk_str += '\f'
# TODO: Allow for hex and octal rep char
else:
tk_str += '\\' + ch
continue
else:
if col - tk_col == 1:
tk_str = ch
elif ch != '"':
tk_str += ch
if ch == '"':
yield Token(tk_row, tk_col, tk_type, tk_str)
state = tkr_sts.BLANK
continue
# Character (starts with ')
elif state == tkr_sts.CHAR:
# TODO: Develop this part, duh'
pass
elif state == tkr_sts.COMMENT:
if on_break:
on_sep = False
on_break = False
yield Token(tk_row, tk_col, tk_type, tk_str)
yield Token(
tk_row, saved_int, tk_tps.LINE_BREAK, None)
tk_row += 1
col = -1
state = saved_state
tk_type = saved_type
continue
tk_str += ch
# End of file
yield Token(None, None, tk_tps.FILE_END, None)
class Parser:
def __init__ (self, file_name = None, file_txt = None):
self._file_name = None
self._file_txt = None
self.tokens = None
self._set_data(file_name, file_txt)
def _set_data (self, file_name, file_txt):
if file_name != None:
self.set_file_name(file_name)
elif file_txt != None:
self.set_file_txt(file_txt)
def set_file_name (self, file_name, load = True):
if not isinstance(file_name, str):
raise Exception(
'\n Parser.set_file_name():\n'
' Argument should be a string'
)
self._file_txt = None
self._file_name = file_name
if load:
with open(file_name, 'r') as asm_file:
self.set_file_txt(asm_file.read())
def set_file_txt (self, file_txt):
if not isinstance(file_txt, str):
raise Exception(
'\n Parser.set_file_txt():\n'
' Argument should be a string'
)
self._file_txt = file_txt
def parse (self, file_name = None, file_txt = None):
self._set_data(file_name, file_txt)
if self._file_txt == None:
raise Exception(
'\n Parser.parse():\n'
' No file name or file text was provided.\n'
' Try using:\n'
' Parser.set_file_name()\n'
' Parser.set_file_txt()'
)
# Tokenize all the lines and
# store them in a list
tokenizer = Tokenizer()
tokenizer.set_file_name(self._file_name, load = False)
tokenizer.set_file_txt(self._file_txt)
self.tokens = list()
for token in tokenizer.tokenize():
print(token)
self.tokens.append(token)
def main ():
args = sys.argv[1:]
file_name = str(args[0])
# Way No. 1
# with open(file_name, 'r') as asm_file:
# parser = Parser()
# parser.set_file_txt(asm_file.read())
# parser.parse()
# Way No. 2
# with open(file_name, 'r') as asm_file:
# parser = Parser(file_txt = asm_file.read())
# parser.parse()
# Way No. 3
# with open(file_name, 'r') as asm_file:
# parser = Parser()
# parser.parse(file_txt = asm_file.read())
# Way No. 4
# parser = Parser()
# parser.set_file_name(file_name)
# parser.parse()
# Way No. 5
# parser = Parser(file_name = file_name)
# parser.parse()
# Way No. 6
# parser = Parser()
# parser.parse(file_name = file_name)
# Way No. 7
parser = Parser(file_name)
parser.parse()
if __name__ == '__main__':
main() |
py | b40a55438ac8c0e7d0b38f5a5aeebbdda86e1b94 | """ `tldextract` accurately separates a URL's subdomain, domain, and public suffix,
using the Public Suffix List (PSL).
>>> import tldextract
>>> tldextract.extract('http://forums.news.cnn.com/')
ExtractResult(subdomain='forums.news', domain='cnn', suffix='com')
>>> tldextract.extract('http://forums.bbc.co.uk/') # United Kingdom
ExtractResult(subdomain='forums', domain='bbc', suffix='co.uk')
>>> tldextract.extract('http://www.worldbank.org.kg/') # Kyrgyzstan
ExtractResult(subdomain='www', domain='worldbank', suffix='org.kg')
`ExtractResult` is a namedtuple, so it's simple to access the parts you want.
>>> ext = tldextract.extract('http://forums.bbc.co.uk')
>>> (ext.subdomain, ext.domain, ext.suffix)
('forums', 'bbc', 'co.uk')
>>> # rejoin subdomain and domain
>>> '.'.join(ext[:2])
'forums.bbc'
>>> # a common alias
>>> ext.registered_domain
'bbc.co.uk'
By default, this package supports the public ICANN TLDs and their exceptions.
You can optionally support the Public Suffix List's private domains as well.
"""
from setuptools import setup
INSTALL_REQUIRES = ["idna", "requests>=2.1.0", "requests-file>=1.4", "filelock>=3.0.8"]
setup(
name="tldextract",
author="John Kurkowski",
author_email="[email protected]",
description=(
"Accurately separates a URL's subdomain, domain, and public suffix, "
"using the Public Suffix List (PSL). By "
"default, this includes the public ICANN TLDs and their "
"exceptions. You can optionally support the Public Suffix "
"List's private domains as well."
),
license="BSD License",
keywords="tld domain subdomain url parse extract urlparse urlsplit public suffix list publicsuffix publicsuffixlist",
url="https://github.com/john-kurkowski/tldextract",
packages=["tldextract"],
include_package_data=True,
python_requires=">=3.7",
long_description=__doc__,
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
entry_points={
"console_scripts": [
"tldextract = tldextract.cli:main",
]
},
setup_requires=["setuptools_scm"],
use_scm_version={
"write_to": "tldextract/_version.py",
},
install_requires=INSTALL_REQUIRES,
)
|
py | b40a5612c274523d93f65b0ce571c48e7945c993 | from django.contrib.auth.models import Group
from django.urls import reverse
from django_webtest import WebTest
from tickets.models import FollowUp, Ticket
from tickets.tests.factories import TicketFactory, UserFactory
class TicketUpdateTestCase(WebTest):
""" """
def setUp(self):
self.user = UserFactory(
username="bsimpson", first_name="Bart", last_name="Simpson"
)
# self.user2 = UserFactory(is_staff=True)
self.user2 = UserFactory(
username="bgumble",
first_name="Barney",
last_name="Gumble",
password="Abcdef12",
is_staff=True,
)
self.user3 = UserFactory(
username="hsimpson", first_name="Homer", last_name="Simpson"
)
adminGrp, created = Group.objects.get_or_create(name="admin")
self.user2.groups.add(adminGrp)
self.status = "new"
self.ticket_type = "bug"
self.description = "There is something wrong."
self.priority = 3
self.ticket = TicketFactory(
submitted_by=self.user,
status=self.status,
ticket_type=self.ticket_type,
description=self.description,
priority=self.priority,
)
def test_update_not_logged_in(self):
"""if you're not logged in you shouldn't be able to edit a
ticket
"""
url = reverse("tickets:update_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url)
location = response["Location"]
new_url = "{0}?next={1}".format(reverse("login"), url)
self.assertRedirects(response, new_url)
self.assertIn(new_url, location)
def test_update_logged_not_owner(self):
"""if you're not the ticket's owner you shouldn't be able to edit a
ticket
"""
login = self.client.login(username=self.user3.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:update_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user3).follow()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
def test_update_logged_owner(self):
"""if you're the ticket's owner you should be able to edit the
ticket
"""
login = self.client.login(username=self.user.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:update_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_form.html")
self.assertContains(
response, f'<h1 class="my-1">Update Ticket # {self.ticket.id}:</h1>'
)
self.assertContains(response, f'<h3 class="my-2">{self.ticket.title}</h3>')
# verify that the form does not contain closed, split or duplicate
# these ticket status values are implemented else where.
# self.assertNotContains(response, 'close') "closed" in menu!
self.assertNotContains(response, "split")
self.assertNotContains(response, "duplicate")
form = response.forms["ticket"]
form["ticket_type"] = "feature"
form["description"] = "Nevermind it is OK."
form["priority"] = 4
response = form.submit().follow()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
self.assertContains(response, "New</span>")
self.assertContains(response, "Feature Request")
self.assertContains(response, "Nevermind it is OK.")
def test_update_logged_admin(self):
"""if you're an administator, you should be able to edit the
ticket even if you didn't create it.
"""
login = self.client.login(username=self.user2.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:update_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_form.html")
form = response.forms["ticket"]
form["ticket_type"] = "feature"
form["description"] = "Nevermind it is OK."
form["priority"] = 4
response = form.submit().follow()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
self.assertContains(response, "New</span>")
self.assertContains(response, "Feature Request")
self.assertContains(response, "Nevermind it is OK.")
def test_assignto_only_admin_staff(self):
"""For now, only administrators should be eligible to assign tickets
to.
"""
login = self.client.login(username=self.user2.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:assign_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user2)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/comment_form.html")
form = response.forms["comment"]
# user2 is the only user who belongs to the admin group in this
# test, he is the only one who should appear as an option int
# the dropdown list, the other two should not.
# Project Leads - should not include Barney:
choices = form["assigned_to"].options
choices = [x[0] for x in choices]
assert str(self.user.id) not in choices
assert str(self.user2.id) in choices # the only admin.
assert str(self.user3.id) not in choices
def test_new_ticket(self):
"""The updateview is used to create tickets if no pk is provided. this
test verifies that it works as expected.
"""
login = self.client.login(username=self.user.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:new_ticket")
response = self.app.get(url, user=self.user)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_form.html")
self.assertContains(response, "New Ticket")
# verify that the form does not contain closed, split or duplicate
# these ticket status values are implemented else where.
# self.assertNotContains(response, 'close') "closed" in menu!
self.assertNotContains(response, "split")
self.assertNotContains(response, "duplicate")
form = response.forms["ticket"]
form["ticket_type"] = "feature"
form["title"] = "New Ticket Title"
form["description"] = "New Ticket created by UpdateView"
form["priority"] = 4
form["application"] = 1
response = form.submit().follow()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
self.assertContains(response, "New</span>")
self.assertContains(response, "Feature Request")
self.assertContains(response, "New Ticket Title")
self.assertContains(response, "New Ticket created by UpdateView")
def test_add_tags_ticket_detail_form(self):
"""if you're the ticket's owner you should be able to edit the
ticket
"""
login = self.client.login(username=self.user.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:update_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_form.html")
# get the form and submit it
form = response.forms["ticket"]
form["tags"] = "blue, green, red, yellow"
response = form.submit("submit").follow()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
# verify that the tags submitted on the form are actually
# saved to the database and associated with this ticket.
tags_back = self.ticket.tags.all().order_by("name")
self.assertEqual(tags_back.count(), 4)
self.assertQuerysetEqual(
tags_back, ["blue", "green", "red", "yellow"], lambda a: str(a.name)
)
class SplitTicketTestCase(WebTest):
""" """
def setUp(self):
self.user = UserFactory(
username="bsimpson", first_name="Bart", last_name="Simpson"
)
self.user2 = UserFactory(
username="bgumble", first_name="Barney", last_name="Gumble", is_staff=True
)
adminGrp, created = Group.objects.get_or_create(name="admin")
self.user2.groups.add(adminGrp)
self.ticket = TicketFactory()
def test_split_not_logged_in(self):
"""if you're not logged in you shouldn't be able to split a
ticket
"""
url = reverse("tickets:split_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url)
location = response["Location"]
new_url = "{0}?next={1}".format(reverse("login"), url)
self.assertRedirects(response, new_url)
self.assertIn(new_url, location)
def test_split_logged_in_not_admin(self):
"""you have to be an administrator to split tickets ticket -
if you are not an administrator, you will be re-directed to
the tickets detail view.
"""
myuser = self.user
login = self.client.login(username=myuser.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:split_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=myuser).follow()
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
self.assertEqual(response.status_code, 200)
def test_split_logged_in_admin_does_not_exsits(self):
"""if you try to split a ticket that does not exist, you will
be re-directed to the ticket list.
"""
myuser = self.user2
login = self.client.login(username=myuser.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:split_ticket", kwargs=({"pk": 999}))
response = self.app.get(url, user=myuser).follow()
self.assertTemplateUsed(response, "tickets/ticket_list.html")
self.assertEqual(response.status_code, 200)
def test_split_logged_admin(self):
"""if you're an administrator, you should be able to split a
ticket
"""
# verify that a comment was created on the original ticket and
# that the status of the original ticket has been updated
# accordingly
# verify that two new tickets where created
# TODO
# if a ticket is assigned to someone already, assigned to is a
# mandatory field
self.ticket.assigned_to = None
myuser = self.user2
login = self.client.login(username=myuser.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:split_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=myuser)
self.assertTemplateUsed(response, "tickets/split_ticket_form.html")
self.assertEqual(response.status_code, 200)
form = response.forms["splitticket"]
msg = "This ticket needs to be split"
msg1 = "This is part 1."
msg2 = "This is part 2."
form["comment"] = msg
form["description1"] = msg1
form["description2"] = msg2
response = form.submit().follow()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
# the comment from the splitting form should be in the response
self.assertContains(response, msg)
msg3 = "This ticket has been split into the following ticket(s):"
self.assertContains(response, msg3)
# verify that self.ticket 1 as two children and its status is split
ticket = Ticket.objects.get(id=self.ticket.id)
self.assertEqual(ticket.status, "split")
children = ticket.get_children()
self.assertQuerysetEqual(
children, [msg1, msg2], lambda a: a.__str__(), ordered=False
)
class CommentTicketTestCase(WebTest):
"""TicketCommentView is used to provide comments, but is also used to
accept, assign and re-assign tickets.
anybod should be able to accept tickets, only admins should be
able to assign and re-assign tickets.
"""
def setUp(self):
self.user = UserFactory()
self.user2 = UserFactory(is_staff=True)
self.user3 = UserFactory(username="hsimpson", is_staff=True)
adminGrp, created = Group.objects.get_or_create(name="admin")
self.user2.groups.add(adminGrp)
self.ticket = TicketFactory(submitted_by=self.user)
def test_comment_non_existent_ticket(self):
"""if we try to comment on a ticket that does not exist, we
should be re-directed to the ticket list.
"""
myuser = self.user2
login = self.client.login(username=myuser.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:comment_ticket", kwargs=({"pk": 99}))
response = self.app.get(url, user=myuser).follow()
self.assertTemplateUsed(response, "tickets/ticket_list.html")
self.assertEqual(response.status_code, 200)
def test_comment_not_logged_in(self):
"""if you're not logged in you shouldn't be able to comment on
a ticket
"""
url = reverse("tickets:comment_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url)
location = response["Location"]
new_url = "{0}?next={1}".format(reverse("login"), url)
self.assertRedirects(response, new_url)
self.assertIn(new_url, location)
def test_comment_logged_in_not_admin(self):
"""you don't have to be an admin to comment on a ticket - just
logged in
"""
login = self.client.login(username=self.user.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:comment_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/comment_form.html")
form = response.forms["comment"]
form["comment"] = "What a great idea"
response = form.submit().follow()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
self.assertContains(response, "What a great idea")
def test_private_comment_logged_in_not_admin_or_creator(self):
"""you can't leave a private comment if you are not an admin
or the ticket creator
"""
myuser = self.user3
login = self.client.login(username=myuser, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:comment_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=myuser)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/comment_form.html")
form = response.forms["comment"]
# private should not be on of the available fields.
self.assertNotIn("private", form.fields.keys())
def test_private_comment_logged_in_admin(self):
"""you can leave a private comment if you are an admin"""
myuser = self.user2
login = self.client.login(username=myuser, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:comment_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=myuser)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/comment_form.html")
form = response.forms["comment"]
form["comment"] = "What a great idea"
form["private"] = True
response = form.submit().follow()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
self.assertContains(response, "What a great idea")
self.assertContains(response, "private")
comment = FollowUp.all_comments.filter(ticket=self.ticket)
self.assertEqual(comment.count(), 1)
self.assertTrue(comment[0].private)
def test_private_comment_logged_in_creator(self):
"""you can leave a private comment if you are the ticket
creator
"""
myuser = self.user
login = self.client.login(username=myuser, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:comment_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=myuser)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/comment_form.html")
form = response.forms["comment"]
form["comment"] = "What a great idea"
form["private"] = True
response = form.submit().follow()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
self.assertContains(response, "What a great idea")
self.assertContains(response, "private")
comment = FollowUp.all_comments.filter(ticket=self.ticket)
self.assertEqual(comment.count(), 1)
self.assertTrue(comment[0].private)
def test_comment_bad_data_logged_in(self):
"""you comment is a manditory field. An error will be thown
if you don't provide one.
"""
login = self.client.login(username=self.user.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:comment_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/comment_form.html")
form = response.forms["comment"]
response = form.submit()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/comment_form.html")
errmsg = "This field is required."
self.assertContains(response, errmsg)
def test_accept_ticket_unlogged_user(self):
"""
A user who is not logged in should not be able to accept a
ticket. If they try, they should be re-directed to the ticket
detail page and the ticket should remain unassigned.
Arguments:
- `self`:
"""
assert self.ticket.assigned_to is None
assert self.ticket.status == "new"
url = reverse("tickets:accept_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user).follow()
self.assertEqual(response.status_code, 200)
url = reverse("tickets:ticket_detail", kwargs=({"pk": self.ticket.id}))
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
assert self.ticket.assigned_to is None
assert self.ticket.status == "new"
def test_assign_ticket_unlogged_user(self):
"""
A user who is not logged in should not be able to assign a
ticket. If they try, they should be re-directed to the ticket
detail page and the ticket should remain unassigned.
Arguments:
- `self`:
"""
assert self.ticket.assigned_to is None
assert self.ticket.status == "new"
url = reverse("tickets:assign_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user).follow()
self.assertEqual(response.status_code, 200)
url = reverse("tickets:ticket_detail", kwargs=({"pk": self.ticket.id}))
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
assert self.ticket.assigned_to is None
assert self.ticket.status == "new"
def test_reassign_ticket_unlogged_user(self):
"""
A user who is not logged in should not be able to re-assign a
ticket. If they try, they should be re-directed to the ticket
detail page and the ticket should remain assigned to the
original user.
Arguments:
- `self`:
"""
self.ticket.assigned_to = self.user3
self.ticket.status = "assigned"
self.ticket.save()
assert self.ticket.assigned_to == self.user3
assert self.ticket.status == "assigned"
url = reverse("tickets:assign_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user).follow()
self.assertEqual(response.status_code, 200)
url = reverse("tickets:ticket_detail", kwargs=({"pk": self.ticket.id}))
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
# nothing has changed
assert self.ticket.assigned_to == self.user3
assert self.ticket.status == "assigned"
def test_accept_ticket_user(self):
"""
If someone who is not an admin tries to accept a ticket
A logged in user should be able to accept a ticket. Once accepted,
the ticket should be assigned to them. When the form loads,
it should not contain the 'Assign To' dropdown box.
Arguments:
- `self`:
"""
# verify that our ticket is not assigned to anyone yet
assert self.ticket.assigned_to is None
assert self.ticket.status == "new"
login = self.client.login(username=self.user.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:accept_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user).follow()
self.assertEqual(response.status_code, 200)
url = reverse("tickets:ticket_detail", kwargs=({"pk": self.ticket.id}))
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
assert self.ticket.assigned_to is None
assert self.ticket.status == "new"
def test_accept_ticket_admin(self):
"""
An admin user should be able to accept a ticket. Once accepted,
the status of the ticket will be 'accepted' but it will not be
assigned to anyone.
Arguments:
- `self`:
"""
# verify that our ticket is not assigned to anyone yet
assert self.ticket.assigned_to is None
assert self.ticket.status == "new"
login = self.client.login(username=self.user.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:accept_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user2)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/comment_form.html")
msg = "Accept Ticket #{}"
self.assertContains(response, msg.format(self.ticket.id))
form = response.forms["comment"]
form["comment"] = "I can do it."
response = form.submit().follow()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
# verify that our ticket is now accepted, but has not been assigned to
# anyone yet
ticket = Ticket.objects.get(id=self.ticket.id)
assert ticket.assigned_to == None
assert ticket.status == "accepted"
def test_assign_ticket_user(self):
"""
A user who is not an administrator should not be able to assign
a ticket. If they try, they should be re-directed to the
ticket detail page, and the ticket should remain unassigned
Arguments:
- `self`:
"""
assert self.ticket.assigned_to is None
assert self.ticket.status == "new"
login = self.client.login(username=self.user.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:assign_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user).follow()
self.assertEqual(response.status_code, 200)
url = reverse("tickets:ticket_detail", kwargs=({"pk": self.ticket.id}))
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
assert self.ticket.assigned_to is None
assert self.ticket.status == "new"
def test_assign_ticket_admin(self):
"""
An administator should be able assign a ticket to another user.
Arguments:
- `self`:
"""
# verify that our ticket is not assigned to anyone yet
assert self.ticket.assigned_to is None
assert self.ticket.status == "new"
login = self.client.login(username=self.user.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:assign_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user2)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/comment_form.html")
msg = "Assign Ticket #{}"
self.assertContains(response, msg.format(self.ticket.id))
form = response.forms["comment"]
form["comment"] = "I have just the person."
form["assigned_to"] = self.user3.id
response = form.submit().follow()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
# our user should now be assigned to this ticket.
ticket = Ticket.objects.get(id=self.ticket.id)
assert ticket.assigned_to == self.user3
assert ticket.status == "assigned"
def test_reassign_ticket_user(self):
"""
A user who is not an administrator should not be able to re-assign
a ticket. If they try, they should be re-directed to the
ticket detail page, and the ticket should remain assigned to
the original user.
Arguments:
- `self`:
"""
# assign our ticket to a user and change its status
self.ticket.assigned_to = self.user3
self.ticket.status = "assigned"
self.ticket.save()
# verify that our chages worked
assert self.ticket.assigned_to == self.user3
assert self.ticket.status == "assigned"
login = self.client.login(username=self.user.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:assign_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user).follow()
self.assertEqual(response.status_code, 200)
url = reverse("tickets:ticket_detail", kwargs=({"pk": self.ticket.id}))
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
# nothing has changed
assert self.ticket.assigned_to == self.user3
assert self.ticket.status == "assigned"
def test_reassign_ticket_admin(self):
"""
An administator should be able reassign a ticket to another user.
Arguments:
- `self`:
"""
# assign our ticket to a user and change its status
self.ticket.assigned_to = self.user2
self.ticket.status = "assigned"
self.ticket.save()
login = self.client.login(username=self.user.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:assign_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user2)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/comment_form.html")
msg = "Assign Ticket #{}"
self.assertContains(response, msg.format(self.ticket.id))
form = response.forms["comment"]
form["comment"] = "I have just the person."
form["assigned_to"] = self.user3.id
response = form.submit().follow()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
# our user should now be assigned to this ticket.
ticket = Ticket.objects.get(id=self.ticket.id)
assert ticket.assigned_to == self.user3
assert ticket.status == "assigned"
class CloseTicketTestCase(WebTest):
""" """
def setUp(self):
self.user = UserFactory()
self.user2 = UserFactory(is_staff=True)
self.user3 = UserFactory(username="hsimpson")
adminGrp, created = Group.objects.get_or_create(name="admin")
self.user2.groups.add(adminGrp)
self.ticket = TicketFactory()
self.ticket2 = TicketFactory(description="This is a duplicate")
def test_close_ticket_admin(self):
"""if you're an administator, you should be able to close a
ticket
"""
login = self.client.login(username=self.user2.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:close_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user2)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/close_reopen_ticket_form.html")
form = response.forms["comment"]
form["comment"] = "This feature has been implemented"
response = form.submit().follow()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
self.assertContains(response, "This feature has been implemented")
ticket = Ticket.objects.get(id=self.ticket.id)
self.assertEqual(ticket.status, "closed")
def test_close_ticket_non_admin(self):
"""if you're an not administator, you should NOT be able to close a
ticket. Instead, you will be re-directed to the ticket list.
"""
myuser = self.user
login = self.client.login(username=myuser.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:close_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=myuser).follow()
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
self.assertEqual(response.status_code, 200)
def test_reopen_ticket_admin(self):
"""if you're an administator, you should be able to reopen a
ticket
"""
# make sure that the ticket is closed before we do anything
self.ticket = Ticket.objects.get(id=self.ticket.id)
self.ticket.status = "closed"
self.ticket.save()
self.assertEqual(self.ticket.status, "closed")
login = self.client.login(username=self.user2.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:reopen_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=self.user2)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/close_reopen_ticket_form.html")
form = response.forms["comment"]
msg = "This ticket needs to be reopened"
form["comment"] = msg
response = form.submit().follow()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
self.assertContains(response, msg)
ticket = Ticket.objects.get(id=self.ticket.id)
self.assertEqual(ticket.status, "reopened")
def test_reopen_ticket_non_admin(self):
"""if you're an not administator, you should NOT be able to reopen a
ticket. You will be re-directed to its detail page.
"""
# make sure that the ticket is closed before we do anything
self.ticket = Ticket.objects.get(id=self.ticket.id)
self.ticket.status = "closed"
self.ticket.save()
self.assertEqual(self.ticket.status, "closed")
myuser = self.user
login = self.client.login(username=myuser.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:reopen_ticket", kwargs=({"pk": self.ticket.id}))
response = self.app.get(url, user=myuser).follow()
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
self.assertEqual(response.status_code, 200)
# make sure that the ticket is still closed
self.ticket = Ticket.objects.get(id=self.ticket.id)
self.ticket.status = "closed"
self.ticket.save()
self.assertEqual(self.ticket.status, "closed")
def test_close_ticket_as_duplicate_admin(self):
"""if you're an administator, you should be able to close a
ticket as a duplicate
"""
# verify that a comment was created and that the status of the
# original ticket has been updated accordingly
login = self.client.login(username=self.user2.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:close_ticket", kwargs=({"pk": self.ticket2.id}))
response = self.app.get(url, user=self.user2)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/close_reopen_ticket_form.html")
form = response.forms["comment"]
msg = "This ticket is a duplicate of an earlier ticket"
form["comment"] = msg
form["duplicate"].checked = True
form["same_as_ticket"] = self.ticket.id
response = form.submit().follow()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/ticket_detail.html")
# verify that the message appears in the response:
self.assertContains(response, msg)
self.assertContains(response, "This ticket duplicates ticket(s):")
# check that the status of ticket 2 has been updated
ticket = Ticket.objects.get(id=self.ticket2.id)
self.assertEqual(ticket.status, "duplicate")
# get the original ticket for ticket 2 and verify that it is ticket 1
original = ticket.get_originals()
self.assertEqual(self.ticket, original[0].original)
def test_close_ticket_as_duplicate_to_self(self):
"""If the ticket number entered in same_as_ticket is the same
as the current ticket, the form should throw an error
"""
login = self.client.login(username=self.user2.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:close_ticket", kwargs=({"pk": self.ticket2.id}))
response = self.app.get(url, user=self.user2)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/close_reopen_ticket_form.html")
form = response.forms["comment"]
msg = "This ticket is a duplicate of an earlier ticket"
form["comment"] = msg
form["duplicate"].checked = True
form["same_as_ticket"] = self.ticket2.id
response = form.submit()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/close_reopen_ticket_form.html")
errmsg = "Invalid ticket number. A ticket cannot duplicate itself."
self.assertContains(response, msg)
self.assertContains(response, errmsg)
ticket = Ticket.objects.get(id=self.ticket2.id)
self.assertEqual(ticket.status, "new")
def test_close_ticket_as_duplicate_missing_ticket(self):
"""If you forget to provide a duplicate ticket, the form
should throw an error
"""
login = self.client.login(username=self.user2.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:close_ticket", kwargs=({"pk": self.ticket2.id}))
response = self.app.get(url, user=self.user2)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/close_reopen_ticket_form.html")
form = response.forms["comment"]
msg = "This ticket is a duplicate of an earlier ticket"
form["comment"] = msg
form["duplicate"].checked = True
response = form.submit()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/close_reopen_ticket_form.html")
errmsg = "Duplicate is true but no ticket number is provided."
self.assertContains(response, msg)
self.assertContains(response, errmsg)
ticket = Ticket.objects.get(id=self.ticket2.id)
self.assertEqual(ticket.status, "new")
def test_close_ticket_as_duplicate_missing_check(self):
"""If you forget to check the duplicate box but provide a
number, the form should throw an error
"""
# verify that a comment was created and that the status of the
# original ticket has been updated accordingly
login = self.client.login(username=self.user2.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:close_ticket", kwargs=({"pk": self.ticket2.id}))
response = self.app.get(url, user=self.user2)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/close_reopen_ticket_form.html")
form = response.forms["comment"]
msg = "This ticket is a duplicate of an earlier ticket"
form["comment"] = msg
form["same_as_ticket"] = 1
response = form.submit()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "tickets/close_reopen_ticket_form.html")
errmsg = "Duplicate is false and a ticket number was provided."
self.assertContains(response, msg)
self.assertContains(response, errmsg)
# verify that the status of ticket2 has not been changed.
ticket = Ticket.objects.get(id=self.ticket2.id)
self.assertEqual(ticket.status, "new")
def test_close_non_existent_ticket(self):
"""if you try to comment on an non-existent ticket, you will
be re-directed to ticket list.
"""
myuser = self.user2
login = self.client.login(username=myuser.username, password="Abcdef12")
self.assertTrue(login)
url = reverse("tickets:close_ticket", kwargs=({"pk": 999}))
response = self.app.get(url, user=myuser).follow()
self.assertTemplateUsed(response, "tickets/ticket_list.html")
self.assertEqual(response.status_code, 200)
|
py | b40a56162f26e8e9046eb5d2780fbc2187e10a87 | from viadot import __version__
def test_version():
assert __version__ == "0.2.2"
|
py | b40a56b095fff757c4b13658c842d78ae99a4833 | # (c) 2022 Warren Usui MOPFPPP
# This code is licensed under the MIT license (see LICENSE.txt for details)
"""
Generate a tier
"""
from utilities import flatten
def tree_gen_next_tier(tree):
"""
Generate the next tier on the tree
@param {tuple} tree Existing tree
@param {tuple} Updated tree with nodes of the next tier added
"""
return _new_next_tier_nodes(_make_next_tier(_get_new_leaf_info(
_find_new_nodes(tree))))
def _get_new_leaf_info(tree):
return (tuple(map(_get_offspring_points, tree)),
tuple(map(_get_offspring_index, tree)))
def _get_offspring_points(node):
return node["offspring"]["points"]
def _get_offspring_index(node):
return node["offspring"]["index"]
def _new_next_tier_nodes(info):
return tuple(map(_next_tier_wrapped, info))
def _next_tier_wrapped(tier):
return {"parent": tier[1], "point": tier[0]}
def _make_next_tier(lev_info):
return zip(flatten(lev_info[0]), flatten(_get_flat_parent_links(
tuple(_get_parent_link_values(lev_info)))))
def _get_parent_link_values(next_level):
return zip(_extract_length(next_level[0]), next_level[1])
def _extract_length(next_level):
return tuple(map(len, next_level))
def _get_flat_parent_links(link_info):
return tuple([_flat_parent_links(xval) for xval in link_info])
def _flat_parent_links(xval):
return tuple([xval[1] for _ in range(0, xval[0])])
def _find_new_nodes(tree):
return tuple(filter(_find_new_nodes_wrapped, tree))
def _find_new_nodes_wrapped(node):
return "offspring" in node
|
py | b40a573372f25b5ed07df8590a24c1000e654afd | from __future__ import annotations
from abc import ABC
from multiprocessing import cpu_count
from typing import Any, Dict, Generator, List, Optional, Tuple, Type, Union
import numpy as np
from numpy import ndarray
from pandas import DataFrame, Series
from sklearn.model_selection import KFold, StratifiedKFold
from tqdm import tqdm
from tqdm.contrib.concurrent import process_map
from error_consistency.containers import ConsistencyResults, KFoldResults
from error_consistency.functional import UnionHandling, error_consistencies, get_y_error
from error_consistency.model import Model, ModelFactory
from error_consistency.parallel import (
get_test_predictions,
get_test_predictions_internal,
validate_kfold_imap,
)
from error_consistency.random import parallel_seed_generators, random_seeds
from error_consistency.utils import array_indexer, to_numpy
PandasData = Union[DataFrame, Series]
Shape = Tuple[int, ...]
def validate_fold(
x: ndarray,
y: ndarray,
x_sample_dim: int,
y_sample_dim: int,
train_idx: ndarray,
val_idx: ndarray,
model: Model,
save_fold_accs: bool,
) -> KFoldResults:
"""Perform the internal k-fold validation step on one fold, only doing what is necessary.
Parameters
----------
train_idx: ndarray
Fold / validation training indices (see notes above).
val_idx: ndarray
Fold / valdiation testing indices (i.e. NOT the final holdout testin indices / set).
compute_score: bool
Whether or not to compute an accuracy.
Returns
-------
kfold_results: KFoldResults
Scroll up in the source code.
:meta private:
"""
# regardless of options, we need to fit the training set
if y.ndim == 2:
raise NotImplementedError("Need to collapse one-hots still")
x_train = array_indexer(x, x_sample_dim, train_idx)
y_train = array_indexer(y, y_sample_dim, train_idx)
model.fit(x_train, y_train)
acc = None
y_pred = None
if save_fold_accs:
x_val = array_indexer(x, x_sample_dim, val_idx)
y_val = array_indexer(y, y_sample_dim, val_idx)
y_pred = model.predict(x_val)
acc = 1 - np.mean(get_y_error(y_pred, y_val, y_sample_dim))
return KFoldResults(model, acc, y_pred, val_idx)
class ErrorConsistencyBase(ABC):
"""Base class for functionality that all error consistency calculatings must perform.
Parameters
----------
model: Intersection[Callable, Type]
A *class* where instances are classifiers that implement:
1. A ``.fit`` or ``.train`` method that:
#. accepts predictors and targets, plus `fit_args`, and
#. updates the state of `model` when calling `.fit` or `.train`
2. A ``.predict`` or ``.test`` method, that:
#. accepts testing samples, plus ``predict_args``, and
#. requires having called ``.fit`` previously, and
#. returns *only* the predictions as a single ArrayLike (e.g. NumPy array, List, pandas
DataFrame or Series)
E.g.::
import numpy as np
from error_consistency import ErrorConsistency
from sklearn.cluster import KNeighborsClassifier as KNN
knn_args = dict(n_neighbors=5, n_jobs=1)
errcon = ErrorConsistency(model=KNN, model_args=knn_args)
# KNN is appropriate here because we could write e.g.
x = np.random.uniform(0, 1, size=[100, 5])
y = np.random.randint(0, 3, size=[100])
x_test = np.random.uniform(0, 1, size=[20, 5])
y_test = np.random.randint(0, 3, size=[20])
KNN.fit(x, y) # updates the state, no need to use a returned value
y_pred = KNN.predict(x_test) # returns a single object
x: Union[List, pandas.DataFrame, pandas.Series, numpy.ndarray]
ArrayLike object containing predictor samples. Must be in a format that is consumable with
`model.fit(x, y, **model_args)` for arguments `model` and `model_args`. By default,
splitting of x into cross-validation subsets will be along the first axis (axis 0), that is,
the first axis is assumed to be the sample dimension. If your fit method requires a
different sample dimension, you can specify this in `x_sample_dim`.
y: Union[List, pandas.DataFrame, pandas.Series, numpy.ndarray]
ArrayLike object containing targets. Must be in a format that is consumable with
`model.fit(x, y, **model_args)` for arguments `model` and `model_args`. By default,
splitting of y into cross-validation subsets will be along the first axis (axis 0), that is,
the first axis is assumed to be the sample dimension. If your fit method requires a
different sample dimension (e.g. y is a one-hot encoded array), you can specify this
in `y_sample_dim`.
n_splits: int = 5
How many folds to use for validating error consistency.
model_args: Optional[Dict[str, Any]]
Any arguments that are required each time to construct a fresh instance of the model (see
above). Note that the data x and y must NOT be included here.
fit_args: Optional[Dict[str, Any]]
Any arguments that are required each time when calling the `.fit` or `.train` methods
internally (see notes for `model` above). Note that the data x and y must NOT be included
here.
fit_args_x_y: Optional[Tuple[str, str]] = None
Name of the arguments which data `x` and target `y` are passed to. This is needed because
different libraries may have different conventions for how they expect predictors and
targets to be passed in to `fit` or `train`.
If None (default), it will be assumed that the `.fit` or `.train` method of the instance of
`model` takes x as its first positional argument, and `y` as its second, as in e.g.
`model.fit(x, y, **model_args)`.
If a tuple of strings (x_name, y_name), then a dict will be constructed internally by
splatting, e.g.
args_dict = {**{x_name: x_train, y_name: y_train}, **model_args}
model.fit(**args_dict)
predict_args: Optional[Dict[str, Any]]
Any arguments that are required each time when calling the `.predict` or `.test` methods
internally (see notes for `model` above). Note that the data x must NOT be included here.
predict_args_x: Optional[str] = None
Name of the argument which data `x` is passed to during evaluation. This is needed because
different libraries may have different conventions for how they expect predictors and
targets to be passed in to `predict` or `test` calls.
If None (default), it will be assumed that the `.predict` or `.test` method of the instance
of `model` takes x as its first positional argument, as in e.g.
`model.predict(x, **predict_args)`.
If `predict_args_x` is a string, then a dict will be constructed internally with this
string, e.g.
args_dict = {**{predict_args_x: x_train}, **model_args}
model.predict(**args_dict)
stratify: bool = False
If True, use sklearn.model_selection.StratifiedKFold during internal k-fold. Otherwise, use
sklearn.model_selection.KFold.
x_sample_dim: int = 0
The axis or dimension along which samples are indexed. Needed for splitting x into
partitions for k-fold.
y_sample_dim: int = 0
The axis or dimension along which samples are indexed. Needed for splitting y into
partitions for k-fold only if the target is e.g. one-hot encoded or dummy-coded.
onehot_y: bool = True
Only relevant for two-dimensional `y`. Set to True if `y` is a one-hot array with samples
indexed by `y_sample_dim`. Set to False if `y` is dummy-coded.
Notes
-----
Conceptually, for each repetition, there are two steps to computing a k-fold error consistency
with holdout set:
(1) evaluation on standard k-fold ("validation" or "folding")
(2) evaluation on holdout set (outside of k-fold) ("testing")
There are a lot of overlapping terms and concepts here, so with analogy to deep learning, we
shall refer to step (1) as *validation* or *val* and step (2) as *testing* or *test*. This will
help keep variable names and function arguments sane and clear. We refer to the *entire* process
of validation + testing as *evaluation*. Thus the .evaluate() method with have both validation
and testing steps, in this terminology.
Since validation is very much just standard k-fold, we also thus refer to validation steps as
*fold* steps. So for example validation or fold scores are the k accuracies on the non-training
partitions of each k-fold repetition (k*repetitions total), but test scores are the
`repititions` accuracies on the heldout test set.
The good thing is that standard k-fold is standard k-fold no matter how we implement
error-consistency (e.g. with holdout, Monte-Carlo style subsetting, etc). We just have train and
(fold) test indices, and do the usual fit calls and etc. So this can be abstracted to the base
error consistency class.
:meta private:
"""
def __init__(
self,
model: Any,
x: ndarray,
y: ndarray,
n_splits: int = 5,
model_args: Optional[Dict[str, Any]] = None,
fit_args: Optional[Dict[str, Any]] = None,
fit_args_x_y: Optional[Tuple[str, str]] = None,
predict_args: Optional[Dict[str, Any]] = None,
predict_args_x: Optional[str] = None,
stratify: bool = False,
x_sample_dim: int = 0,
y_sample_dim: int = 0,
empty_unions: UnionHandling = 0,
) -> None:
self.model: Model
self.stratify: bool
self.n_splits: int
self.x: ndarray
self.y: ndarray
self.x_sample_dim: int
self.y_sample_dim: int
# if user is using DataFrames, save reference to these for the variable names
self.x_df: Optional[PandasData]
self.y_df: Optional[PandasData]
self.model_factory = ModelFactory(
model,
model_args,
fit_args,
fit_args_x_y,
predict_args,
predict_args_x,
x_sample_dim,
y_sample_dim,
)
self.stratify = stratify
if n_splits < 2:
raise ValueError("Must have more than one split for K-fold.")
self.n_splits = n_splits
self.empty_unions = empty_unions
self.x, self.y, self.x_df, self.y_df = self.save_x_y(x, y)
dim_info = self.save_dims(x, y, x_sample_dim, y_sample_dim)
self.x_transpose_shape, self.y_transpose_shape = dim_info[:2]
self.x_sample_dim, self.y_sample_dim = dim_info[2:]
@staticmethod
def save_x_y(x: ndarray, y: ndarray) -> Tuple[ndarray, ndarray, PandasData, PandasData]:
""" :meta private: """
x_df = x if isinstance(x, DataFrame) or isinstance(x, Series) else None
y_df = y if isinstance(y, DataFrame) or isinstance(y, Series) else None
x = to_numpy(x)
y = to_numpy(y)
if y.ndim > 2:
raise ValueError("Target `y` can only be 1-dimensional or two-dimensional.")
if y.ndim == 2:
uniques = np.unique(y.ravel()).astype(int)
if not np.array_equal(uniques, [0, 1]):
raise ValueError("Only dummy-coded and one-hot coded 2D targets are supported.")
return x, y, x_df, y_df
@staticmethod
def save_dims(
x: ndarray, y: ndarray, x_sample_dim: int, y_sample_dim: int
) -> Tuple[Shape, Shape, int, int]:
""" :meta private: """
# we need to convert the sample dimensions to positive values to construct transpose shapes
if y_sample_dim > 1 or y_sample_dim < -1:
raise ValueError(
"Invalid `y_sample_dim`. Must be 0 for one-dimensional `y`, "
"and either 1 or -1 for two-dimensional `y`."
)
y_dim = int(np.abs(y_sample_dim)) # 1, -1 have same behaviour if dim=2, abs(0) is 0
if (x_sample_dim > x.ndim - 1) or (x_sample_dim < -x.ndim):
raise ValueError(
"Invalid `x_sample_dim`. `x_sample_dim` must satisfy "
"`x.ndim - 1 < x_sample_dim < -x.ndim`"
)
x_dim = x.ndim - x_sample_dim if x_sample_dim < 0 else x_sample_dim
xdim_indices = list(range(int(x.ndim)))
x_transpose_shape = (x_dim, *xdim_indices[:x_dim], *xdim_indices[x_dim + 1 :])
y_transpose_shape = y.shape if y.ndim != 2 else ((0, 1) if y_dim == 1 else (1, 0))
return x_transpose_shape, y_transpose_shape, x_dim, y_dim
def validate_fold(
self, train_idx: ndarray, val_idx: ndarray, save_fold_accs: bool
) -> KFoldResults:
"""Perform the internal k-fold validation step on one fold, only doing what is necessary.
Parameters
----------
train_idx: ndarray
Fold / validation training indices (see notes above).
val_idx: ndarray
Fold / valdiation testing indices (i.e. NOT the final holdout testin indices / set).
compute_score: bool
Whether or not to compute an accuracy.
Returns
-------
kfold_results: KFoldResults
Scroll up in the source code.
:meta private:
"""
# regardless of options, we need to fit the training set
if self.y.ndim == 2:
raise NotImplementedError("Need to collapse one-hots still")
x_train = array_indexer(self.x, self.x_sample_dim, train_idx)
y_train = array_indexer(self.y, self.y_sample_dim, train_idx)
model = self.model_factory.create()
model.fit(x_train, y_train)
acc = None
y_pred = None
if save_fold_accs:
x_val = array_indexer(self.x, self.x_sample_dim, val_idx)
y_val = array_indexer(self.y, self.y_sample_dim, val_idx)
y_pred = model.predict(x_val)
acc = 1 - np.mean(get_y_error(y_pred, y_val, self.y_sample_dim))
return KFoldResults(model, acc, y_pred, val_idx)
def starmap_args(
self, repetitions: int, save_fold_accs: bool, seed: Optional[int]
) -> Generator[Tuple[Any, ...], None, None]:
rngs = parallel_seed_generators(seed, repetitions)
model = self.model_factory.create
for i in range(repetitions):
yield (
self.x,
self.y,
self.x_sample_dim,
self.y_sample_dim,
self.n_splits,
self.stratify,
[model() for _ in range(self.n_splits)],
rngs[i],
save_fold_accs,
)
class ErrorConsistencyKFoldHoldout(ErrorConsistencyBase):
"""Compute error consistencies for a classifier.
Parameters
----------
model: Intersection[Callable, Type]
A *class* where instances are classifiers that implement:
1. A ``.fit`` or ``.train`` method that:
* accepts predictors and targets, plus `fit_args`, and
* updates the state of `model` when calling `.fit` or `.train`
2. A ``.predict`` or ``.test`` method, that:
* accepts testing samples, plus ``predict_args``, and
* requires having called ``.fit`` previously, and
* returns *only* the predictions as a single ArrayLike (e.g. NumPy array, List, pandas
DataFrame or Series)
.. _valid model example:
E.g.::
import numpy as np
from error_consistency import ErrorConsistency
from sklearn.cluster import KNeighborsClassifier as KNN
knn_args = dict(n_neighbors=5, n_jobs=1)
errcon = ErrorConsistency(model=KNN, model_args=knn_args)
# KNN is appropriate here because we could write e.g.
x = np.random.uniform(0, 1, size=[100, 5])
y = np.random.randint(0, 3, size=[100])
x_test = np.random.uniform(0, 1, size=[20, 5])
y_test = np.random.randint(0, 3, size=[20])
KNN.fit(x, y) # updates the state, no need to use a returned value
y_pred = KNN.predict(x_test) # returns a single object
x: Union[List, pandas.DataFrame, pandas.Series, numpy.ndarray]
ArrayLike object containing predictor samples. Must be in a format that is consumable with
`model.fit(x, y, **model_args)` for arguments `model` and `model_args`. By default,
splitting of x into cross-validation subsets will be along the first axis (axis 0), that is,
the first axis is assumed to be the sample dimension. If your fit method requires a
different sample dimension, you can specify this in `x_sample_dim`.
y: Union[List, pandas.DataFrame, pandas.Series, numpy.ndarray]
ArrayLike object containing targets. Must be in a format that is consumable with
`model.fit(x, y, **model_args)` for arguments `model` and `model_args`. By default,
splitting of y into cross-validation subsets will be along the first axis (axis 0), that is,
the first axis is assumed to be the sample dimension. If your fit method requires a
different sample dimension (e.g. y is a one-hot encoded array), you can specify this
in `y_sample_dim`.
n_splits: int = 5
How many folds to use, and thus models to generate, per repetition.
model_args: Optional[Dict[str, Any]]
Any arguments that are required each time to construct a fresh instance of the model (see
the `valid model example`_ above). Note that the data x and y must NOT be included here.
fit_args: Optional[Dict[str, Any]]
Any arguments that are required each time when calling the `.fit` or `.train` methods
internally (see the `valid model example`_ above). Note that the data x and y must NOT be
included here.
fit_args_x_y: Optional[Tuple[str, str]] = None
Name of the arguments which data `x` and target `y` are passed to. This is needed because
different libraries may have different conventions for how they expect predictors and
targets to be passed in to `fit` or `train`. For example, a function may have the
signature::
f(predictor: ndarray, target: ndarray) -> Any
To allow our internal `x_train` and `x_test` splits to be passed to the right arguments,
we thus need to know these names.
If None (default), it will be assumed that the `.fit` or `.train` method of the instance of
`model` takes x as its first positional argument, and `y` as its second, as in e.g.
`model.fit(x, y, **model_args)`.
If a tuple of strings (x_name, y_name), then a dict will be constructed internally by
splatting, e.g.::
args_dict = {**{x_name: x_train, y_name: y_train}, **model_args}
model.fit(**args_dict)
Alternately, see the documentation for `error_consistency.model.Model` for how to subclass
your own function here if you require more fine-grained control of how arguments are passed
into the fit and predict calls.
predict_args: Optional[Dict[str, Any]]
Any arguments that are required each time when calling the `.predict` or `.test` methods
internally (see the `valid model example`_ above). Note that the data x must NOT be included
here.
predict_args_x: Optional[str] = None
Name of the argument which data `x` is passed to during evaluation. This is needed because
different libraries may have different conventions for how they expect predictors and
targets to be passed in to `predict` or `test` calls.
If None (default), it will be assumed that the `.predict` or `.test` method of the instance
of `model` takes x as its first positional argument, as in e.g.
`model.predict(x, **predict_args)`.
If `predict_args_x` is a string, then a dict will be constructed internally with this
string, e.g.::
args_dict = {**{predict_args_x: x_train}, **model_args}
model.predict(**args_dict)
stratify: bool = False
If True, use sklearn.model_selection.StratifiedKFold during internal k-fold. Otherwise, use
sklearn.model_selection.KFold.
x_sample_dim: int = 0
The axis or dimension along which samples are indexed. Needed for splitting x into
partitions for k-fold.
y_sample_dim: int = 0
The axis or dimension along which samples are indexed. Needed for splitting y into
partitions for k-fold only if the target is e.g. one-hot encoded or dummy-coded.
empty_unions: UnionHandling = 0
When computing the pairwise consistency or leave-one-out consistency on small or
simple datasets, it can be the case that the union of the error sets is empty (e.g. if no
prediction errors are made). In this case the intersection over union is 0/0, which is
undefined.
* If `0` (default), the consistency for that collection of error sets is set to zero.
* If `1`, the consistency for that collection of error sets is set to one.
* If "nan", the consistency for that collection of error sets is set to `np.nan`.
* If "drop", the `consistencies` array will not include results for that collection,
but the consistency matrix will include `np.nans`.
* If "error", an empty union will cause a `ZeroDivisionError`.
* If "warn", an empty union will print a warning (probably a lot).
Notes
-----
Conceptually, for each repetition, there are two steps to computing a k-fold error consistency
with holdout set:
(1) evaluation on standard k-fold ("validation" or "folding")
(2) evaluation on holdout set (outside of k-fold) ("testing")
There are a lot of overlapping terms and concepts here, so with analogy to deep learning, we
shall refer to step (1) as *validation* or *val* and step (2) as *testing* or *test*. This will
help keep variable names and function arguments sane and clear. We refer to the *entire* process
of validation + testing as *evaluation*. Thus the .evaluate() method with have both validation
and testing steps, in this terminology.
Since validation is very much just standard k-fold, we also thus refer to validation steps as
*fold* steps. So for example validation or fold scores are the k accuracies on the non-training
partitions of each k-fold repetition (k*repetitions total), but test scores are the
`repititions` accuracies on the heldout test set.
"""
def __init__(
self,
model: Any,
x: ndarray,
y: ndarray,
n_splits: int,
model_args: Optional[Dict[str, Any]] = None,
fit_args: Optional[Dict[str, Any]] = None,
fit_args_x_y: Optional[Tuple[str, str]] = None,
predict_args: Optional[Dict[str, Any]] = None,
predict_args_x: Optional[str] = None,
stratify: bool = False,
x_sample_dim: int = 0,
y_sample_dim: int = 0,
empty_unions: UnionHandling = 0,
) -> None:
super().__init__(
model,
x,
y,
n_splits=n_splits,
model_args=model_args,
fit_args=fit_args,
fit_args_x_y=fit_args_x_y,
predict_args=predict_args,
predict_args_x=predict_args_x,
stratify=stratify,
x_sample_dim=x_sample_dim,
y_sample_dim=y_sample_dim,
empty_unions=empty_unions,
)
def evaluate(
self,
x_test: ndarray,
y_test: ndarray,
repetitions: int = 5,
save_test_accs: bool = True,
save_test_errors: bool = False,
save_test_predictions: bool = False,
save_fold_accs: bool = False,
save_fold_preds: bool = False,
save_fold_models: bool = False,
empty_unions: UnionHandling = 0,
show_progress: bool = True,
parallel_reps: Union[bool, int] = False,
loo_parallel: Union[bool, int] = False,
turbo: bool = False,
seed: int = None,
) -> ConsistencyResults:
"""Evaluate the error consistency of the classifier.
Parameters
----------
x_test: Union[List, pandas.DataFrame, pandas.Series, numpy.ndarray]
ArrayLike object containing holdout predictor samples that the model will never be
trained or fitted on. Must be have a format identical to that of `x` passed into
constructor (see above).
y_test: Union[List, pandas.DataFrame, pandas.Series, numpy.ndarray]
ArrayLike object containing holdout target values that the model will never be trained
or fitted on. Must be have a format identical to that of `x` passed into constructor
(see above).
repetitions: int = 5
How many times to repeat the k-fold process. Yields `k*repetitions` error consistencies
if both `x_test` and `y_test` are proided, and `repetitions*(repititions - 1)/2`
consistencies otherwise. Note that if both `x_test` and `y_test` are not provided, then
setting repetitions to 1 will raise an error, since this results in insufficient arrays
to compare errors.
save_test_accs: bool = True
If True (default) also compute accuracy scores for each fold on `x_test` and save them
in `results.scores`. If False, skip this step. Setting to `False` is useful when
prediction is expensive and/or you only care about evaulating the error consistency.
save_test_errors: bool = False
If True, save a list of the boolean error arrays (`y_pred_i != y_test` for fold `i`) for
all repetitions in `results.test_errors`. Total of `k * repetitions` values if k > 1.
If False (default), `results.test_errors` will be `None`.
save_test_predictions: bool = False
If True, save an array of the predictions `y_pred_i` for fold `i` for all repetitions in
`results.test_predictions`. Total of `k * repetitions` values if k > 1. If False
(default), `results.test_predictions` will be `None`.
save_fold_accs: bool = False
If True, save an array of shape `(repetitions, k)` of the predictions on the *fold* test
set (`y_pred_fold_i` for fold `i`) for all repetitions in `results.fold_accs`.
save_fold_preds: bool = False
If True, save a NumPy array of shape `(repetitions, k, n_samples)` of the predictions on
the *fold* test set (`y_pred_fold_i` for fold `i`) for all repetitions in
`results.fold_predictions`.
save_fold_models: bool = False
If True, `results.fold_models` is a NumPy object array of size (repetitions, k) where
each entry (r, i) is the fitted model on repetition `r` fold `i`.
seed: int = None
Seed for reproducible results
Returns
-------
results: ConsistencyResults
An `error_consistency.containers.ConsistencyResults` object.
"""
self.x_test, self.y_test = self.save_x_y(x_test, y_test)[0:2]
seeds = random_seeds(seed, repetitions)
if self.stratify:
kfolds = [
StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=seed)
for seed in seeds
]
else:
kfolds = [
KFold(n_splits=self.n_splits, shuffle=True, random_state=seed) for seed in seeds
]
test_errors: List[ndarray] = []
test_accs: ndarray = []
test_predictions: ndarray = []
fold_accs: ndarray = []
fold_predictions: ndarray = []
fold_models: ndarray = []
idx = np.arange(0, int(self.x.shape[self.x_sample_dim]), dtype=int)
if self.y.ndim == 2:
y_split = np.argmax(self.y, axis=1 - np.abs(self.y_sample_dim)) # convert to labels
else:
y_split = self.y
if parallel_reps is False:
rep_desc, fold_desc = "K-fold Repetition {}", "Fold {}"
rep_pbar = tqdm(
total=repetitions, desc=rep_desc.format(0), leave=True, disable=not show_progress
)
for rep, kfold in enumerate(kfolds): # we have `repetitions` ("rep") kfold partitions
rep_pbar.set_description(rep_desc.format(rep))
fold_pbar = tqdm(
total=self.n_splits,
desc=fold_desc.format(0),
leave=False,
disable=not show_progress,
)
for k, (train_idx, test_idx) in enumerate(kfold.split(idx, y_split)):
fold_pbar.set_description(fold_desc.format(k))
results = self.validate_fold(train_idx, test_idx, save_fold_accs)
if save_fold_accs:
fold_accs.append(results.score)
if save_fold_preds:
fold_predictions.append(results.prediction)
if save_fold_models:
fold_models.append(results.fitted_model)
fitted = results.fitted_model
y_pred = fitted.predict(x_test)
y_err = get_y_error(y_pred, y_test, self.y_sample_dim)
test_predictions.append(y_pred)
if save_test_accs:
acc = 1 - np.mean(y_err)
test_accs.append(acc)
test_errors.append(y_err)
fold_pbar.update()
fold_pbar.close()
rep_pbar.update()
rep_pbar.close()
else:
if parallel_reps is True:
cpus = cpu_count()
else:
cpus = parallel_reps
rep_results = process_map(
validate_kfold_imap,
self.starmap_args(repetitions, save_fold_accs, seed),
max_workers=cpus,
desc="Repeating k-fold",
total=repetitions,
disable=not show_progress,
)
y_preds_list = process_map(
get_test_predictions,
[(rep_result, x_test) for rep_result in rep_results],
max_workers=cpus,
desc="Computing holdout predictions",
total=repetitions,
disable=not show_progress,
)
for results_list, y_preds in tqdm(
zip(rep_results, y_preds_list),
desc="Saving results",
total=repetitions,
disable=not show_progress,
):
for results, y_pred in zip(results_list, y_preds):
if save_fold_accs:
fold_accs.append(results.score)
if save_fold_preds:
fold_predictions.append(results.prediction)
if save_fold_models:
fold_models.append(results.fitted_model)
fitted = results.fitted_model
y_err = get_y_error(y_pred, y_test, self.y_sample_dim)
test_predictions.append(y_pred)
if save_test_accs:
acc = 1 - np.mean(y_err)
test_accs.append(acc)
test_errors.append(y_err)
errcon_results = error_consistencies(
y_preds=test_predictions,
y_true=y_test,
sample_dim=self.y_sample_dim,
empty_unions=empty_unions,
loo_parallel=loo_parallel,
turbo=turbo,
log_progress=show_progress,
)
consistencies, matrix, unpredictables, predictables, loo_consistencies = errcon_results
numerator = np.sum(unpredictables)
total = numerator / np.sum(predictables) if numerator > 0 else 0
return ConsistencyResults(
consistencies=consistencies,
matrix=matrix,
total_consistency=total,
leave_one_out_consistency=np.mean(loo_consistencies),
test_errors=test_errors if save_test_errors else None,
test_accs=np.array(test_accs) if save_test_accs else None,
test_predictions=test_predictions if save_test_predictions else None,
fold_accs=np.array(fold_accs) if save_fold_accs else None,
fold_predictions=fold_predictions if save_fold_preds else None,
fold_models=fold_models if save_fold_models else None,
)
def starmap_args(
self, repetitions: int, save_fold_accs: bool, seed: Optional[int]
) -> Generator[Tuple[Any, ...], None, None]:
rngs = parallel_seed_generators(seed, repetitions)
model = self.model_factory.create
for i in range(repetitions):
yield (
self.x,
self.y,
self.x_sample_dim,
self.y_sample_dim,
self.n_splits,
self.stratify,
[model() for _ in range(self.n_splits)],
rngs[i],
save_fold_accs,
)
class ErrorConsistencyKFoldInternal(ErrorConsistencyBase):
"""Compute error consistencies for a classifier.
Parameters
----------
model: Intersection[Callable, Type]
A *class* where instances are classifiers that implement:
1. A ``.fit`` or ``.train`` method that:
* accepts predictors and targets, plus `fit_args`, and
* updates the state of `model` when calling `.fit` or `.train`
2. A ``.predict`` or ``.test`` method, that:
* accepts testing samples, plus ``predict_args``, and
* requires having called ``.fit`` previously, and
* returns *only* the predictions as a single ArrayLike (e.g. NumPy array, List, pandas
DataFrame or Series)
.. _valid model example:
E.g.::
import numpy as np
from error_consistency import ErrorConsistency
from sklearn.cluster import KNeighborsClassifier as KNN
knn_args = dict(n_neighbors=5, n_jobs=1)
errcon = ErrorConsistency(model=KNN, model_args=knn_args)
# KNN is appropriate here because we could write e.g.
x = np.random.uniform(0, 1, size=[100, 5])
y = np.random.randint(0, 3, size=[100])
x_test = np.random.uniform(0, 1, size=[20, 5])
y_test = np.random.randint(0, 3, size=[20])
KNN.fit(x, y) # updates the state, no need to use a returned value
y_pred = KNN.predict(x_test) # returns a single object
x: Union[List, pandas.DataFrame, pandas.Series, numpy.ndarray]
ArrayLike object containing predictor samples. Must be in a format that is consumable with
`model.fit(x, y, **model_args)` for arguments `model` and `model_args`. By default,
splitting of x into cross-validation subsets will be along the first axis (axis 0), that is,
the first axis is assumed to be the sample dimension. If your fit method requires a
different sample dimension, you can specify this in `x_sample_dim`.
y: Union[List, pandas.DataFrame, pandas.Series, numpy.ndarray]
ArrayLike object containing targets. Must be in a format that is consumable with
`model.fit(x, y, **model_args)` for arguments `model` and `model_args`. By default,
splitting of y into cross-validation subsets will be along the first axis (axis 0), that is,
the first axis is assumed to be the sample dimension. If your fit method requires a
different sample dimension (e.g. y is a one-hot encoded array), you can specify this
in `y_sample_dim`.
n_splits: int = 5
How many folds to use, and thus models to generate, per repetition.
model_args: Optional[Dict[str, Any]]
Any arguments that are required each time to construct a fresh instance of the model (see
the `valid model example`_ above). Note that the data x and y must NOT be included here.
fit_args: Optional[Dict[str, Any]]
Any arguments that are required each time when calling the `.fit` or `.train` methods
internally (see the `valid model example`_ above). Note that the data x and y must NOT be
included here.
fit_args_x_y: Optional[Tuple[str, str]] = None
Name of the arguments which data `x` and target `y` are passed to. This is needed because
different libraries may have different conventions for how they expect predictors and
targets to be passed in to `fit` or `train`. For example, a function may have the
signature::
f(predictor: ndarray, target: ndarray) -> Any
To allow our internal `x_train` and `x_test` splits to be passed to the right arguments,
we thus need to know these names.
If None (default), it will be assumed that the `.fit` or `.train` method of the instance of
`model` takes x as its first positional argument, and `y` as its second, as in e.g.
`model.fit(x, y, **model_args)`.
If a tuple of strings (x_name, y_name), then a dict will be constructed internally by
splatting, e.g.::
args_dict = {**{x_name: x_train, y_name: y_train}, **model_args}
model.fit(**args_dict)
Alternately, see the documentation for `error_consistency.model.Model` for how to subclass
your own function here if you require more fine-grained control of how arguments are passed
into the fit and predict calls.
predict_args: Optional[Dict[str, Any]]
Any arguments that are required each time when calling the `.predict` or `.test` methods
internally (see the `valid model example`_ above). Note that the data x must NOT be included
here.
predict_args_x: Optional[str] = None
Name of the argument which data `x` is passed to during evaluation. This is needed because
different libraries may have different conventions for how they expect predictors and
targets to be passed in to `predict` or `test` calls.
If None (default), it will be assumed that the `.predict` or `.test` method of the instance
of `model` takes x as its first positional argument, as in e.g.
`model.predict(x, **predict_args)`.
If `predict_args_x` is a string, then a dict will be constructed internally with this
string, e.g.::
args_dict = {**{predict_args_x: x_train}, **model_args}
model.predict(**args_dict)
stratify: bool = False
If True, use sklearn.model_selection.StratifiedKFold during internal k-fold. Otherwise, use
sklearn.model_selection.KFold.
x_sample_dim: int = 0
The axis or dimension along which samples are indexed. Needed for splitting x into
partitions for k-fold.
y_sample_dim: int = 0
The axis or dimension along which samples are indexed. Needed for splitting y into
partitions for k-fold only if the target is e.g. one-hot encoded or dummy-coded.
empty_unions: UnionHandling = 0
When computing the pairwise consistency or leave-one-out consistency on small or
simple datasets, it can be the case that the union of the error sets is empty (e.g. if no
prediction errors are made). In this case the intersection over union is 0/0, which is
undefined.
* If `0` (default), the consistency for that collection of error sets is set to zero.
* If `1`, the consistency for that collection of error sets is set to one.
* If "nan", the consistency for that collection of error sets is set to `np.nan`.
* If "drop", the `consistencies` array will not include results for that collection,
but the consistency matrix will include `np.nans`.
* If "error", an empty union will cause a `ZeroDivisionError`.
* If "warn", an empty union will print a warning (probably a lot).
Notes
-----
Conceptually, for each repetition, there are two steps to computing a k-fold error consistency
with holdout set:
(1) evaluation on standard k-fold ("validation" or "folding")
(2) evaluation on holdout set (outside of k-fold) ("testing")
There are a lot of overlapping terms and concepts here, so with analogy to deep learning, we
shall refer to step (1) as *validation* or *val* and step (2) as *testing* or *test*. This will
help keep variable names and function arguments sane and clear. We refer to the *entire* process
of validation + testing as *evaluation*. Thus the .evaluate() method with have both validation
and testing steps, in this terminology.
Since validation is very much just standard k-fold, we also thus refer to validation steps as
*fold* steps. So for example validation or fold scores are the k accuracies on the non-training
partitions of each k-fold repetition (k*repetitions total), but test scores are the
`repititions` accuracies on the heldout test set.
"""
def __init__(
self,
model: Any,
x: ndarray,
y: ndarray,
n_splits: int,
model_args: Optional[Dict[str, Any]] = None,
fit_args: Optional[Dict[str, Any]] = None,
fit_args_x_y: Optional[Tuple[str, str]] = None,
predict_args: Optional[Dict[str, Any]] = None,
predict_args_x: Optional[str] = None,
stratify: bool = True,
x_sample_dim: int = 0,
y_sample_dim: int = 0,
empty_unions: UnionHandling = 0,
) -> None:
super().__init__(
model,
x,
y,
n_splits=n_splits,
model_args=model_args,
fit_args=fit_args,
fit_args_x_y=fit_args_x_y,
predict_args=predict_args,
predict_args_x=predict_args_x,
stratify=stratify,
x_sample_dim=x_sample_dim,
y_sample_dim=y_sample_dim,
empty_unions=empty_unions,
)
def evaluate(
self,
repetitions: int = 5,
save_test_accs: bool = True,
save_test_errors: bool = False,
save_test_predictions: bool = False,
save_fold_accs: bool = False,
save_fold_preds: bool = False,
save_fold_models: bool = False,
show_progress: bool = True,
parallel_reps: Union[bool, int] = False,
loo_parallel: Union[bool, int] = False,
turbo: bool = False,
seed: int = None,
) -> ConsistencyResults:
"""Evaluate the error consistency of the classifier.
Parameters
----------
repetitions: int = 5
How many times to repeat the k-fold process. Yields `repetitions*(repititions - 1)/2`
consistencies if `repetitions` is greater than 1. Setting repetitions to 1 instead uses
the entire set `X` for prediction for each fold, thus yield `k*(k-1)/2` consistencies,
but which are strongly biased toward a value much lower than the true consistency.
Useful for quick checks / fast estimates of upper bounds on the error consistency, but
otherwise not recommended.
save_test_accs: bool = True
If True (default) also compute accuracy scores for each fold on `x_test` and save them
in `results.scores`. If False, skip this step. Setting to `False` is useful when
prediction is expensive and/or you only care about evaulating the error consistency.
save_test_errors: bool = False
If True, save a list of the boolean error arrays (`y_pred_i != y_test` for fold `i`) for
all repetitions in `results.test_errors`. Total of `k * repetitions` values if k > 1.
If False (default), `results.test_errors` will be `None`.
save_test_predictions: bool = False
If True, save an array of the predictions `y_pred_i` for fold `i` for all repetitions in
`results.test_predictions`. Total of `k * repetitions` values if k > 1. If False
(default), `results.test_predictions` will be `None`.
save_fold_accs: bool = False
If True, save an array of shape `(repetitions, k)` of the predictions on the *fold* test
set (`y_pred_fold_i` for fold `i`) for all repetitions in `results.fold_accs`.
save_fold_preds: bool = False
If True, save a NumPy array of shape `(repetitions, k, n_samples)` of the predictions on
the *fold* test set (`y_pred_fold_i` for fold `i`) for all repetitions in
`results.fold_predictions`.
save_fold_models: bool = False
If True, `results.fold_models` is a NumPy object array of size (repetitions, k) where
each entry (r, i) is the fitted model on repetition `r` fold `i`.
seed: int = None
Seed for reproducible results
Returns
-------
results: ConsistencyResults
The `error_consistency.containers.ConsistencyResults` object.
"""
seeds = random_seeds(seed, repetitions)
if self.stratify:
kfolds = [
StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=seed)
for seed in seeds
]
else:
kfolds = [
KFold(n_splits=self.n_splits, shuffle=True, random_state=seed) for seed in seeds
]
test_errors: List[ndarray] = []
test_accs: ndarray = []
test_predictions: ndarray = []
fold_accs: ndarray = []
fold_predictions: ndarray = []
fold_models: ndarray = []
idx = np.arange(0, int(self.x.shape[self.x_sample_dim]), dtype=int)
if self.y.ndim == 2:
y_split = np.argmax(self.y, axis=1 - np.abs(self.y_sample_dim)) # convert to labels
else:
y_split = self.y
if parallel_reps is False:
rep_desc, fold_desc = "K-fold Repetition {}", "Fold {}"
rep_pbar = tqdm(
total=repetitions, desc=rep_desc.format(0), leave=True, disable=not show_progress
)
for rep, kfold in enumerate(kfolds): # we have `repetitions` ("rep") kfold partitions
fold_combined_preds = np.full_like(y_split, -1)
rep_pbar.set_description(rep_desc.format(rep))
fold_pbar = tqdm(
total=self.n_splits,
desc=fold_desc.format(0),
leave=False,
disable=not show_progress,
)
for k, (train_idx, test_idx) in enumerate(kfold.split(idx, y_split)):
fold_pbar.set_description(fold_desc.format(k))
# We have `save_fold_accs=True` below because we need to run the predictions
# to assemble the piecewise predictions, regardless of whether or not we save
# the piecewise predictions individually later
results = self.validate_fold(train_idx, test_idx, save_fold_accs=True)
y_pred = results.prediction
fold_combined_preds[test_idx] = y_pred
if save_fold_accs:
fold_accs.append(results.score)
if save_fold_preds:
fold_predictions.append(y_pred)
if save_fold_models:
fold_models.append(results.fitted_model)
fold_pbar.update()
fold_pbar.close()
y_pred = fold_combined_preds
y_err = get_y_error(y_pred, y_split, self.y_sample_dim)
acc = 1 - np.mean(y_err)
test_predictions.append(y_pred)
test_accs.append(acc)
test_errors.append(y_err)
rep_pbar.update()
rep_pbar.close()
else:
if parallel_reps is True:
cpus = cpu_count()
else:
cpus = parallel_reps
rep_results: List[List[KFoldResults]] = process_map(
validate_kfold_imap,
# We have `save_fold_accs=True` (repetitions, True, seed) below because we need to
# run the predictions to assemble the piecewise predictions, regardless of whether
# or not we save the piecewise predictions individually later
self.starmap_args(repetitions, True, seed),
max_workers=cpus,
desc="Repeating k-fold",
total=repetitions,
disable=not show_progress,
)
results_list: List[KFoldResults]
for results_list in tqdm(
rep_results, desc="Saving results", total=repetitions, disable=not show_progress
):
fold_combined_preds = np.full_like(y_split, -1)
for results in results_list:
y_pred = results.prediction
test_idx = results.test_idx
fold_combined_preds[test_idx] = y_pred
if save_fold_accs:
fold_accs.append(results.score)
if save_fold_preds:
fold_predictions.append(y_pred)
if save_fold_models:
fold_models.append(results.fitted_model)
y_err = get_y_error(fold_combined_preds, y_split, self.y_sample_dim)
acc = 1 - np.mean(y_err)
test_predictions.append(fold_combined_preds)
test_accs.append(acc)
test_errors.append(y_err)
errcon_results = error_consistencies(
y_preds=test_predictions,
y_true=y_split,
sample_dim=self.y_sample_dim,
empty_unions=self.empty_unions,
loo_parallel=loo_parallel,
turbo=turbo,
log_progress=show_progress,
)
consistencies, matrix, unpredictables, predictables, loo_consistencies = errcon_results
numerator = np.sum(unpredictables)
total = numerator / np.sum(predictables) if numerator > 0 else 0
if len(loo_consistencies) == 0:
loo_consistencies = np.nan
return ConsistencyResults(
consistencies=consistencies,
matrix=matrix,
total_consistency=total,
leave_one_out_consistency=np.mean(loo_consistencies),
test_errors=test_errors if save_test_errors else None,
test_accs=np.array(test_accs) if save_test_accs else None,
test_predictions=test_predictions if save_test_predictions else None,
fold_accs=np.array(fold_accs) if save_fold_accs else None,
fold_predictions=fold_predictions if save_fold_preds else None,
fold_models=fold_models if save_fold_models else None,
)
class ErrorConsistencyMonteCarlo:
"""Calculate error consistency using repeated random train/test splits."""
class ErrorConsistency(ErrorConsistencyBase):
"""Compute the error consistency of a classifier.
Parameters
----------
model: Intersection[Callable, Type]
A *class* where instances are classifiers that implement:
1. A ``.fit`` or ``.train`` method that:
#. accepts predictors and targets, plus `fit_args`, and
#. updates the state of `model` when calling `.fit` or `.train`
2. A ``.predict`` or ``.test`` method, that:
#. accepts testing samples, plus ``predict_args``, and
#. requires having called ``.fit`` previously, and
#. returns *only* the predictions as a single ArrayLike (e.g. NumPy array, List, pandas
DataFrame or Series)
E.g.::
import numpy as np
from error_consistency import ErrorConsistency
from sklearn.cluster import KNeighborsClassifier as KNN
knn_args = dict(n_neighbors=5, n_jobs=1)
errcon = ErrorConsistency(model=KNN, model_args=knn_args)
# KNN is appropriate here because we could write e.g.
x = np.random.uniform(0, 1, size=[100, 5])
y = np.random.randint(0, 3, size=[100])
x_test = np.random.uniform(0, 1, size=[20, 5])
y_test = np.random.randint(0, 3, size=[20])
KNN.fit(x, y) # updates the state, no need to use a returned value
y_pred = KNN.predict(x_test) # returns a single object
x: Union[List, pandas.DataFrame, pandas.Series, numpy.ndarray]
ArrayLike object containing predictor samples. Must be in a format that is consumable with
`model.fit(x, y, **model_args)` for arguments `model` and `model_args`. If using external
validation (e.g. passing `x_test` into `ErrorConsistency.evaluate`), you must ensure `x`
does not contain `x_test`, that is, this argument functions as if it is `x_train`.
Otherwise, if using internal validation, splitting of x into validation subsets will be
along the first axis (axis 0), that is, the first axis is assumed to be the sample
dimension. If your fit method requires a different sample dimension, you can specify this
in `x_sample_dim`.
y: Union[List, pandas.DataFrame, pandas.Series, numpy.ndarray]
ArrayLike object containing targets. Must be in a format that is consumable with
`model.fit(x, y, **model_args)` for arguments `model` and `model_args`. If using external
validation (e.g. passing `x_test` into `ErrorConsistency.evaluate`), you must ensure `x`
does not contain `x_test`, that is, this argument functions as if it is `x_train`.
Otherwise, if using internal validation, splitting of y into validation subsets will be
along the first axis (axis 0), that is, the first axis is assumed to be the sample
dimension. If your fit method requires a different sample dimension (e.g. y is a one-hot
encoded array), you can specify this in `y_sample_dim`.
n_splits: int = 5
How many folds to use for validating error consistency. Only relevant
model_args: Optional[Dict[str, Any]]
Any arguments that are required each time to construct a fresh instance of the model (see
above). Note that the data x and y must NOT be included here.
fit_args: Optional[Dict[str, Any]]
Any arguments that are required each time when calling the `.fit` or `.train` methods
internally (see notes for `model` above). Note that the data x and y must NOT be included
here.
fit_args_x_y: Optional[Tuple[str, str]] = None
Name of the arguments which data `x` and target `y` are passed to. This is needed because
different libraries may have different conventions for how they expect predictors and
targets to be passed in to `fit` or `train`.
If None (default), it will be assumed that the `.fit` or `.train` method of the instance of
`model` takes x as its first positional argument, and `y` as its second, as in e.g.
`model.fit(x, y, **model_args)`.
If a tuple of strings (x_name, y_name), then a dict will be constructed internally by
splatting, e.g.
args_dict = {**{x_name: x_train, y_name: y_train}, **model_args}
model.fit(**args_dict)
predict_args: Optional[Dict[str, Any]]
Any arguments that are required each time when calling the `.predict` or `.test` methods
internally (see notes for `model` above). Note that the data x must NOT be included here.
predict_args_x: Optional[str] = None
Name of the argument which data `x` is passed to during evaluation. This is needed because
different libraries may have different conventions for how they expect predictors and
targets to be passed in to `predict` or `test` calls.
If None (default), it will be assumed that the `.predict` or `.test` method of the instance
of `model` takes x as its first positional argument, as in e.g.
`model.predict(x, **predict_args)`.
If `predict_args_x` is a string, then a dict will be constructed internally with this
string, e.g.
args_dict = {**{predict_args_x: x_train}, **model_args}
model.predict(**args_dict)
stratify: bool = False
If True, use sklearn.model_selection.StratifiedKFold during internal k-fold. Otherwise, use
sklearn.model_selection.KFold.
x_sample_dim: int = 0
The axis or dimension along which samples are indexed. Needed for splitting x into
partitions for k-fold.
y_sample_dim: int = 0
The axis or dimension along which samples are indexed. Needed for splitting y into
partitions for k-fold only if the target is e.g. one-hot encoded or dummy-coded.
empty_unions: UnionHandling = 0
When computing the pairwise consistency or leave-one-out consistency on small or
simple datasets, it can be the case that the union of the error sets is empty (e.g. if no
prediction errors are made). In this case the intersection over union is 0/0, which is
undefined.
* If `0` (default), the consistency for that collection of error sets is set to zero.
* If `1`, the consistency for that collection of error sets is set to one.
* If "nan", the consistency for that collection of error sets is set to `np.nan`.
* If "drop", the `consistencies` array will not include results for that collection,
but the consistency matrix will include `np.nans`.
* If "error", an empty union will cause a `ZeroDivisionError`.
* If "warn", an empty union will print a warning (probably a lot).
onehot_y: bool = True
Only relevant for two-dimensional `y`. Set to True if `y` is a one-hot array with samples
indexed by `y_sample_dim`. Set to False if `y` is dummy-coded.
Notes
-----
Conceptually, for each repetition, there are two steps to computing a k-fold error consistency
with holdout set:
(1) evaluation on standard k-fold ("validation" or "folding")
(2) evaluation on holdout set (outside of k-fold) ("testing")
There are a lot of overlapping terms and concepts here, so with analogy to deep learning, we
shall refer to step (1) as *validation* or *val* and step (2) as *testing* or *test*. This will
help keep variable names and function arguments sane and clear. We refer to the *entire* process
of validation + testing as *evaluation*. Thus the .evaluate() method with have both validation
and testing steps, in this terminology.
Since validation is very much just standard k-fold, we also thus refer to validation steps as
*fold* steps. So for example validation or fold scores are the k accuracies on the non-training
partitions of each k-fold repetition (k*repetitions total), but test scores are the
`repititions` accuracies on the heldout test set.
The good thing is that standard k-fold is standard k-fold no matter how we implement
error-consistency (e.g. with holdout, Monte-Carlo style subsetting, etc). We just have train and
(fold) test indices, and do the usual fit calls and etc. So this can be abstracted to the base
error consistency class.
:meta public:
"""
def __init__(
self,
model: Any,
x: ndarray,
y: ndarray,
n_splits: int,
model_args: Optional[Dict[str, Any]] = None,
fit_args: Optional[Dict[str, Any]] = None,
fit_args_x_y: Optional[Tuple[str, str]] = None,
predict_args: Optional[Dict[str, Any]] = None,
predict_args_x: Optional[str] = None,
stratify: bool = False,
x_sample_dim: int = 0,
y_sample_dim: int = 0,
empty_unions: UnionHandling = 0,
onehot_y: bool = True,
) -> None:
super().__init__(
model,
x,
y,
n_splits=n_splits,
model_args=model_args,
fit_args=fit_args,
fit_args_x_y=fit_args_x_y,
predict_args=predict_args,
predict_args_x=predict_args_x,
stratify=stratify,
x_sample_dim=x_sample_dim,
y_sample_dim=y_sample_dim,
empty_unions=empty_unions,
)
def evaluate(
self,
repetitions: int = 5,
x_test: ndarray = None,
y_test: ndarray = None,
save_test_accs: bool = True,
save_test_errors: bool = False,
save_test_predictions: bool = False,
save_fold_accs: bool = False,
save_fold_preds: bool = False,
save_fold_models: bool = False,
empty_unions: UnionHandling = 0,
show_progress: bool = True,
parallel_reps: bool = False,
loo_parallel: bool = False,
turbo: bool = False,
seed: int = None,
) -> ConsistencyResults:
"""Evaluate the error consistency of the classifier.
Parameters
----------
repetitions: int = 5
How many times to repeat the k-fold process. Yields `k*repetitions` error consistencies
if both `x_test` and `y_test` are provided, and `repetitions*(repititions - 1)/2`
consistencies otherwise. Note that if both `x_test` and `y_test` are not provided, then
setting repetitions to 1 will raise an error, since this results in insufficient arrays
to compare errors.
x_test: Union[List, pandas.DataFrame, pandas.Series, numpy.ndarray]
ArrayLike object containing holdout predictor samples that the model will never be
trained or fitted on. Must be have a format identical to that of `x` passed into
constructor (see above).
y_test: Union[List, pandas.DataFrame, pandas.Series, numpy.ndarray]
ArrayLike object containing holdout target values that the model will never be trained
or fitted on. Must be have a format identical to that of `x` passed into constructor
(see above).
save_test_accs: bool = True
If True (default) also compute accuracy scores and save them in the returned
`results.scores`. If False, skip this step.
Note: when `x_test` and `y_test` are provided, test accuracies are over these values.
When not provided, test accuracies are over the entire set `y` passed into the
`ErrorConsistency` constructor, but constructed from each fold (e.g. if there are `k`
splits, the predictions on the k disjoint folds are joined together to get one total
set of predictions for that repetition).
save_test_errors: bool = False
If True, save a list of the boolean error arrays (`y_pred != y_test`) for all
repetitions. If False (default), the return value `results` will have
`results.test_errors` be `None`.
Note: when `x_test` and `y_test` are provided, errors are on `y_test`.
When not provided, test accuracies are over the entire set `y` passed into the
`ErrorConsistency` constructor, but constructed from each fold (e.g. if there are `k`
splits, the predictions on the k disjoint folds are joined together to get one total
set of predictions for that repetition).
save_test_predictions: bool = False
If True, save an array of the predictions `y_pred_i` for fold `i` for all repetitions in
`results.test_predictions`. Total of `k * repetitions` values if k > 1. If False
(default), `results.test_predictions` will be `None`.
Note: when `x_test` and `y_test` are provided, predictions are for `y_test`.
When not provided, predictions are for the entire set `y` passed into the
`error_consistency.consistency.ErrorConsistency` constructor, but constructed from the
models trained on each disjoint fold (e.g. if there are `k` splits, the predictions on
the `k` disjoint folds are joined together to get one total set of predictions for that
repetition). That is, the predictions are the combined results of `k` different models.
save_fold_accs: bool = False
If True, save a list of shape `(repetitions, k)` of the predictions on the *fold* test
sets for all repetitions. This list will be available in `results.fold_accs`. If False,
do not save these values.
Note: when `x_test` and `y_test` are provided, and `save_fold_accs=False` and
`save_fold_preds=False`, then the entire prediction and accuracy evaluation on each
k-fold will be skipped, potentially saving significant compute time, depending on the
model and size of the dataset. However, when using an internal validation method
(`x_test` and `y_test` are not provided) this prediction step still must be executed.
save_fold_preds: bool = False
If True, save a list with shape `(repetitions, k, n_samples)` of the predictions on
the *fold* test set for all repetitions. This list will be abalable in
`results.fold_predictions`. If False, do not save these values. See Notes above for
extra details on this behaviour.
save_fold_models: bool = False
If True, `results.fold_models` is a nested list of size (repetitions, k) where
each entry (r, i) is the *fitted* model on repetition `r` fold `i`.
Note: During parallelization, new models are constructed each time using the passed in
`model` class and the model arguments.Parallelization pickles these models and the
associated data, and then the actual models are fit in each separate process. When
there is no parallelization, the procedure is still similar, in that separate models
are created for every repetition. Thus, you have to be careful about memory when using
`save_fold_models` and a large number of repetions. The `error-consistency` library
wraps all `model` classes passed in into a `Model` class which is used internally to
unify interfacing across various libraries. This `Model` class is very tiny, and is not
a concern for memory, but if the wrapped model is large, you may have memory problems.
E.g. KNN and other memory-based methods which may have an option `save_x_y` or the like
could lead to problems when using `save_fold_models=True`.
seed: int = None
Seed for reproducible results.
Returns
-------
results: ConsistencyResults
An `error_consistency.containers.ConsistencyResults` object.
"""
if (x_test, y_test) == (None, None):
self.consistency_class: Type[ErrorConsistencyBase] = ErrorConsistencyKFoldHoldout
elif (x_test is not None) and (y_test is not None):
self.consistency_class = ErrorConsistencyKFoldInternal
else:
raise ValueError(
"If providing external holdout data, *both* `x_test` and `y_test` must be provided."
)
|
py | b40a57a0dd4f99607465ae6a3ac8411123f4ef40 | # scoping.level.1.py
def my_function():
test = 1 # this is defined in the local scope of the function
print('my_function:', test)
test = 0 # this is defined in the global scope
my_function()
print('global:', test)
|
py | b40a5872bb6f042332144832da236091c6f02c98 | import sys
import os.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from authentication import ws
from azureml.core import Dataset
from datetime import datetime
from azureml.core.compute import AmlCompute, ComputeTarget
from azureml.datadrift import DataDriftDetector, AlertConfiguration
from azureml.data import DataType
# Load data, and register target
def data_load_to_blob(input_dataset=None, blob_header=None):
dstore = ws.get_default_datastore()
dstore.upload(input_dataset, blob_header, overwrite=True, show_progress=True)
print(f'Data load complete.')
def create_baseline_and_target(blob_header=None, target_name=None, baseline_name=None):
dstore = ws.get_default_datastore()
target_datapath = [(dstore, blob_header + '/2020/*/*/data.parquet')]
baseline_datapath = [(dstore, blob_header + '/2019/*/*/data.parquet')]
partition_format = blob_header + '/{partition_time:yyyy/mm/dd}/data.parquet'
# Create target dataset
target_tabular = Dataset.Tabular.from_parquet_files(path=target_datapath, partition_format = partition_format)
tsd = target_tabular.with_timestamp_columns('partition_time')
target = tsd.register(ws, target_name, create_new_version=True,
description='Data for Tabular Dataset with time series',
tags={'type':'TabularDataset'}
)
# Create baseline dataset
baseline_tabular = Dataset.Tabular.from_parquet_files(path = baseline_datapath, partition_format = partition_format)
bsd = baseline_tabular.with_timestamp_columns('partition_time')
baseline = bsd.register(ws, baseline_name, create_new_version=True,
description='Data for Tabular Dataset with time series',
tags={'type':'TabularDataset'}
)
return target, baseline
def select_features(tabular_dataset=None):
columns = list(tabular_dataset.take(1).to_pandas_dataframe())
exclude = ['year', 'day', 'version', '__index_level_0__', 'usaf', 'wban']
features = [col for col in columns if col not in exclude]
return features
# Get dataset monitor
def get_dataset_monitor(ws=None, name=None, baseline=None, target=None, compute_target=None, features=None):
dataset_monitor_name = name
try:
monitor = DataDriftDetector.get_by_name(ws, dataset_monitor_name)
print(f'Found the dataset monitor called: {dataset_monitor_name}')
except:
alert_config = AlertConfiguration(['[email protected]']) # replace with your email to recieve alerts from the scheduled pipeline after enabling
monitor = DataDriftDetector.create_from_datasets(
ws, dataset_monitor_name, baseline, target,
compute_target=compute_target,
frequency='Week',# how often to analyze target data
feature_list=features,
drift_threshold=None,# threshold from 0 to 1 for email alerting
latency=0,# SLA in hours for target data to arrive in the dataset
alert_config=alert_config)
print(f'Created the dataset monitor called {dataset_monitor_name}')
return monitor
def trigger_run(monitor=None):
## update the feature list
#monitor = monitor.update(feature_list=features)
# Trigger run for backfill for one month
start_date = datetime(2020, 1, 1)
end_date = datetime(2020, 3, 31)
backfill = monitor.backfill(start_date, end_date)
# make sure the backfill has completed
backfill.wait_for_completion(wait_post_processing=True)
if __name__ == "__main__":
blob_header='SEATTLE'
data_load_to_blob(input_dataset='./seattle-weather-data', blob_header=blob_header)
target, baseline = create_baseline_and_target(
blob_header=blob_header,
target_name='seattle-weather-target',
baseline_name='seattle-weather-baseline'
)
features = select_features(tabular_dataset=target)
monitor = get_dataset_monitor(ws=ws,
name='seattle-weather-datadrift',
baseline=baseline,
target=target,
compute_target='gpu-cluster',
features=features
)
trigger_run(monitor=monitor)
|
py | b40a59ba4026225d797cb73f9048b6f1df96824d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Merge CSV Files.
This script reads contents from different CSV files and merges them in one CSV
file.
Revision History:
2020-07-12 (Animesh): Baseline Software.
Example:
$ python folder_to_csv.py
"""
#___Import Modules:
import pandas as pd
from sklearn.utils import shuffle
#___Global Variables:
# FILE = ["data/lists/NORMAL/train.csv",
# "data/lists/PNEUMONIA/train.csv"]
# OFILE = "data/train.csv"
# FILE = ["data/lists/NORMAL/val.csv",
# "data/lists/PNEUMONIA/val.csv"]
# OFILE = "data/val.csv"
# FILE = ["data/lists/NORMAL/train.csv",
# "data/lists/Augmented/train.csv",
# "data/lists/PNEUMONIA/train.csv"]
# OFILE = "data/train_aug.csv"
# FILE = ["data/lists/NORMAL/val.csv",
# "data/lists/Augmented/val.csv",
# "data/lists/PNEUMONIA/val.csv"]
# OFILE = "data/val_aug.csv"
FILE = ["data/lists/NORMAL/test.csv",
"data/lists/PNEUMONIA/test.csv"]
OFILE = "data/test.csv"
SEED = 717
#___Main Method:
def main():
"""This is the Main Method.
This method contains training and testing session for image classification.
"""
# read contents from CSV files
content = []
for ifile in FILE:
content.extend([pd.read_csv(ifile)])
# combine contents in one list
df = content[0]
for frame in content[1:]:
df = df.append(frame)
# shuffle contents randomly and write in a CSV file
df = shuffle(df, random_state=SEED)
df.to_csv(OFILE, index=False)
return None
#___Driver Program:
if __name__ == "__main__":
main()
#
# end of file
"""ANI717""" |
py | b40a5b2d09f2f5d16140f4e1d51d43fc325f14a3 | from flask import render_template, redirect, url_for, flash, request
from flask_login import current_user
import datetime
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, IntegerField
from flask_babel import _, lazy_gettext as _l
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
from .models.product import Product
from .models.purchase import Purchase, ItemizedPurchase
from flask import Blueprint
bp = Blueprint('sellerhistory', __name__)
class FulfillForm(FlaskForm):
submit = SubmitField(_l('Change Quantity'))
# load the seller's history
@bp.route('/sellerhistory', methods=['GET', 'POST'])
def sellerhistory():
form = FulfillForm()
# fulfill an order line
if(form.validate_on_submit()):
ItemizedPurchase.fulfill(request.values.get('order_id'), request.values.get(
'product_id'), request.values.get('seller_id'))
return redirect(url_for('sellerhistory.sellerhistory'))
# find the user's inventory:
if current_user.is_authenticated:
orders = Purchase.get_all()
else:
orders = None
# prepare the frontend to display aggregated information
itemOrders = []
for order in orders:
ip = ItemizedPurchase.get(order.order_id, current_user.id)
if (len(ip) > 0):
sum = 0
total = 0
fulfilled = 'FULFILLED'
for p in ip:
sum += p.quantity
total += p.price * p.quantity
if p.fullfilled == 0:
fulfilled = 'UNFULFILLED'
ip.append(total)
ip.append(fulfilled)
ip.append(sum)
itemOrders.append(ip)
# render the page by adding information to the index.html file
return render_template('sellerhistory.html',
itemOrders=itemOrders, form=form)
|
py | b40a5c2a3d6fb24f4709bbd5ddbf65160451c281 | # Generated by Django 3.0.5 on 2020-08-25 08:56
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0023_add_referral_answer_attachment_with_base_class"),
]
operations = [
migrations.CreateModel(
name="ReferralUrgency",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"duration",
models.DurationField(
help_text="Expected treatment duration", verbose_name="duration"
),
),
(
"is_default",
models.BooleanField(
default=False,
help_text="Whether this urgency level is the default level for new referrals",
verbose_name="is default",
),
),
("name", models.CharField(max_length=200, verbose_name="name")),
(
"requires_justification",
models.BooleanField(
help_text="Whether to require a justification when this urgency is selected",
verbose_name="requires justification",
),
),
],
options={
"verbose_name": "referral urgency",
"db_table": "partaj_referral_urgency",
},
),
migrations.AddField(
model_name="referral",
name="urgency_level",
field=models.ForeignKey(
blank=True,
help_text="Urgency level. When is the referral answer needed?",
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="core.ReferralUrgency",
verbose_name="urgency",
),
),
]
|
py | b40a5c6e9ee3c547059ec514e3a3f7c5d1eb0d1c | import pytest
from fastapi import status
from httpx import AsyncClient
from sqlalchemy.ext.asyncio import AsyncSession
from app import models
from app.core.config import settings
from app.tests.utils.auth import (
get_expired_user_token_headers,
get_not_active_user_token_headers,
get_user_token_headers,
)
from app.tests.utils.purchase import create_random_purchase_in_db
# region get purchases - GET /purchases/
@pytest.mark.asyncio
async def test_get_purchases_must_accept_get_verb(
async_client: AsyncClient, random_purchase: models.Purchase
) -> None:
headers = get_user_token_headers(random_purchase.user_)
response = await async_client.get(
url=f"{settings.API_V1_STR}/purchases/", headers=headers
)
assert response.status_code != status.HTTP_405_METHOD_NOT_ALLOWED
@pytest.mark.asyncio
async def test_when_successfully_get_purchases_must_return_200(
async_client: AsyncClient, random_purchase: models.Purchase
) -> None:
headers = get_user_token_headers(random_purchase.user_)
response = await async_client.get(
f"{settings.API_V1_STR}/purchases/", headers=headers
)
assert response.status_code == status.HTTP_200_OK
@pytest.mark.asyncio
async def test_when_successfully_get_purchases_must_return_a_list(
async_client: AsyncClient, random_purchase: models.Purchase
) -> None:
headers = get_user_token_headers(random_purchase.user_)
response = await async_client.get(
f"{settings.API_V1_STR}/purchases/", headers=headers
)
assert isinstance(response.json(), list)
@pytest.mark.asyncio
async def test_when_successfully_get_purchases_it_must_be_returned(
async_client: AsyncClient, random_purchase: models.Purchase
) -> None:
headers = get_user_token_headers(random_purchase.user_)
response = await async_client.get(
f"{settings.API_V1_STR}/purchases/", headers=headers
)
assert response.json()[0].get("id") == random_purchase.id
@pytest.mark.asyncio
async def test_when_getting_purchases_if_limit_is_passed_must_return_the_correct_quantity_of_purchases(
random_user: models.User, async_client: AsyncClient, db: AsyncSession
) -> None:
for _ in range(3):
await create_random_purchase_in_db(db=db, user=random_user)
headers = get_user_token_headers(random_user)
response = await async_client.get(
f"{settings.API_V1_STR}/purchases/?limit=2", headers=headers
)
assert len(response.json()) == 2
@pytest.mark.asyncio
async def test_when_getting_purchases_if_skip_is_passed_must_skip_the_correct_quantity_of_purchases(
random_user: models.User, async_client: AsyncClient, db: AsyncSession
) -> None:
purchases = []
for _ in range(3):
purchases.append(
await create_random_purchase_in_db(db=db, user=random_user)
)
headers = get_user_token_headers(random_user)
response = await async_client.get(
f"{settings.API_V1_STR}/purchases/?skip=2&limit=2", headers=headers
)
assert response.json()[0].get("id") == purchases[2].id
@pytest.mark.asyncio
async def test_when_getting_purchases_if_token_user_is_not_authenticated_must_return_401(
async_client: AsyncClient,
) -> None:
response = await async_client.get(f"{settings.API_V1_STR}/purchases/")
assert response.status_code == status.HTTP_401_UNAUTHORIZED
@pytest.mark.asyncio
async def test_when_getting_purchases_if_token_is_expired_must_return_403(
async_client: AsyncClient, random_purchase: models.Purchase
) -> None:
headers = get_expired_user_token_headers(random_purchase.user_)
response = await async_client.get(
f"{settings.API_V1_STR}/purchases/", headers=headers
)
assert response.status_code == status.HTTP_403_FORBIDDEN
@pytest.mark.asyncio
async def test_when_getting_purchases_if_token_is_not_active_yet_must_return_403(
async_client: AsyncClient, random_purchase: models.Purchase
) -> None:
headers = get_not_active_user_token_headers(random_purchase.user_)
response = await async_client.get(
f"{settings.API_V1_STR}/purchases/", headers=headers
)
assert response.status_code == status.HTTP_403_FORBIDDEN
# endregions
|
py | b40a5cbcd94126b96e67a287dc968a3a495ef874 | import asyncio
from datetime import datetime
import coc
from aiohttp import ClientConnectionError
async def update_trophy_change_analysis(daemon, sleep_time: int):
old_leaderboard = await daemon.coc_client.get_location_players_versus(limit=200)
old_tags = set(player.tag for player in old_leaderboard)
old_trophies_dict = {player.tag: player.versus_trophies for player in old_leaderboard}
old_places_dict = {player.tag: player.rank for player in old_leaderboard}
duel_count = 0
last_minute = datetime.now().minute
while True:
try:
trophy_changes = {}
try:
new_leaderboard = await daemon.coc_client.get_location_players_versus(limit=200)
if new_leaderboard == old_leaderboard:
raise ClientConnectionError
except ClientConnectionError:
await asyncio.sleep(sleep_time)
continue
except coc.Maintenance:
await asyncio.sleep(sleep_time)
continue
new_tags = set(player.tag for player in new_leaderboard)
new_trophies_dict = {player.tag: player.versus_trophies for player in new_leaderboard}
new_places_dict = {player.tag: player.rank for player in new_leaderboard}
for common_tag in old_tags & new_tags:
trophy_change = abs(new_trophies_dict[common_tag] - old_trophies_dict[common_tag])
if trophy_change:
if trophy_change not in trophy_changes:
trophy_changes[trophy_change] = []
trophy_changes[trophy_change].append(common_tag)
duel_count += len([change for change, tags in trophy_changes.items() if len(tags) == 2])
async with daemon.pool.acquire() as conn:
await conn.executemany('''
INSERT INTO TrophyChangeAnalysis
(time, previous_places, previous_trophies, trophy_change)
VALUES($1, $2, $3, $4)
''',
[[datetime.now(), [old_places_dict[tag] for tag in tags],
[old_trophies_dict[tag] for tag in tags], change]
for change, tags in trophy_changes.items() if len(tags) == 2])
if last_minute > datetime.now().minute:
await conn.execute('''
INSERT INTO TopLadderActivity(time, duels)
Values($1, $2)
''',
datetime.now(), duel_count)
duel_count = 0
old_leaderboard = new_leaderboard
old_tags = new_tags
old_trophies_dict = new_trophies_dict
old_places_dict = new_places_dict
last_minute = datetime.now().minute
except Exception as error:
daemon.logger.critical(error, exc_info=True)
exc = ''.join(traceback.format_exception(type(error), error, error.__traceback__, chain=True))
print(exc)
await asyncio.sleep(sleep_time)
|
py | b40a5cd9889d9e265e673a6f43b23afa2ea33700 | import sys
import typing
import bpy_types
class ConsoleAutocomplete(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class ConsoleBanner(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class ConsoleCopyAsScript(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class ConsoleExec(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class ConsoleLanguage(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
|
py | b40a5cfdd581b95c6f8804e177a59c9654e05536 | from __future__ import print_function
from keras.datasets import mnist
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout
from keras.models import Sequential
from keras.callbacks import TensorBoard
import keras as kr
from keras.models import load_model
name = '3x_cmd'
log = 'tb/' + name
mdl = 'models/' + name + '.h5'
bs = 100
num_classes = 10
epochs = 20
tb = TensorBoard(log_dir=log, histogram_freq=0, batch_size=bs, write_graph=True, write_grads=False, embeddings_freq=0, update_freq='epoch')
# input image dimensions
img_x, img_y = 28, 28
# load the MNIST data set, which already splits into train and test sets for us
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# reshape the data into a 4D tensor - (sample_number, x_img_size, y_img_size, num_channels)
# because the MNIST is greyscale, we only have a single channel - RGB colour images would have 3
x_train = x_train.reshape(x_train.shape[0], img_x, img_y, 1)
x_test = x_test.reshape(x_test.shape[0], img_x, img_y, 1)
input_shape = (img_x, img_y, 1)
# convert the data to the right type
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices - this is for use in the
# categorical_crossentropy loss below
y_train = kr.utils.to_categorical(y_train, num_classes)
y_test = kr.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(16, (3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(16, (3, 3), activation='relu'))
model.add(Conv2D(16, (3, 3), activation='relu'))
model.add( Dropout(0.2))
model.add(Flatten())
model.add(Dense(num_classes, activation='softmax'))
opt = kr.optimizers.Adadelta()
model.compile( loss=kr.losses.categorical_crossentropy, optimizer=opt, metrics=['accuracy'] )
model.fit(x_train, y_train, batch_size=bs, epochs=epochs, verbose=1, validation_data=(x_test, y_test), callbacks=[tb] )
score = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
model.save(mdl)
|
py | b40a5da6ea99ea6451cba38d6c374deb533bb225 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import os
import sys
import debugpy
from debugpy import launcher
from debugpy.common import compat, json
from debugpy.common.compat import unicode
from debugpy.launcher import debuggee
def launch_request(request):
debug_options = set(request("debugOptions", json.array(unicode)))
# Handling of properties that can also be specified as legacy "debugOptions" flags.
# If property is explicitly set to false, but the flag is in "debugOptions", treat
# it as an error. Returns None if the property wasn't explicitly set either way.
def property_or_debug_option(prop_name, flag_name):
assert prop_name[0].islower() and flag_name[0].isupper()
value = request(prop_name, bool, optional=True)
if value == ():
value = None
if flag_name in debug_options:
if value is False:
raise request.isnt_valid(
'{0!j}:false and "debugOptions":[{1!j}] are mutually exclusive',
prop_name,
flag_name,
)
value = True
return value
python_args = request("pythonArgs", json.array(unicode, vectorize=True, size=(0,)))
cmdline = [compat.filename(sys.executable)] + python_args
if not request("noDebug", json.default(False)):
port = request("port", int)
cmdline += [
compat.filename(os.path.dirname(debugpy.__file__)),
"--connect",
launcher.adapter_host + ":" + str(port),
]
if not request("subProcess", True):
cmdline += ["--configure-subProcess", "False"]
qt_mode = request(
"qt",
json.enum(
"auto", "none", "pyside", "pyside2", "pyqt4", "pyqt5", optional=True
),
)
cmdline += ["--configure-qt", qt_mode]
adapter_access_token = request("adapterAccessToken", unicode, optional=True)
if adapter_access_token != ():
cmdline += ["--adapter-access-token", compat.filename(adapter_access_token)]
debugpy_args = request("debugpyArgs", json.array(unicode))
cmdline += debugpy_args
# Further arguments can come via two channels: the launcher's own command line, or
# "args" in the request; effective arguments are concatenation of these two in order.
# Arguments for debugpy (such as -m) always come via CLI, but those specified by the
# user via "args" are passed differently by the adapter depending on "argsExpansion".
cmdline += sys.argv[1:]
cmdline += request("args", json.array(unicode))
process_name = request("processName", compat.filename(sys.executable))
env = os.environ.copy()
env_changes = request("env", json.object(unicode))
if sys.platform == "win32":
# Environment variables are case-insensitive on Win32, so we need to normalize
# both dicts to make sure that env vars specified in the debug configuration
# overwrite the global env vars correctly. If debug config has entries that
# differ in case only, that's an error.
env = {k.upper(): v for k, v in os.environ.items()}
n = len(env_changes)
env_changes = {k.upper(): v for k, v in env_changes.items()}
if len(env_changes) != n:
raise request.isnt_valid('Duplicate entries in "env"')
if "DEBUGPY_TEST" in env:
# If we're running as part of a debugpy test, make sure that codecov is not
# applied to the debuggee, since it will conflict with pydevd.
env.pop("COV_CORE_SOURCE", None)
env.update(env_changes)
if request("gevent", False):
env["GEVENT_SUPPORT"] = "True"
console = request(
"console",
json.enum(
"internalConsole", "integratedTerminal", "externalTerminal", optional=True
),
)
redirect_output = property_or_debug_option("redirectOutput", "RedirectOutput")
if redirect_output is None:
# If neither the property nor the option were specified explicitly, choose
# the default depending on console type - "internalConsole" needs it to
# provide any output at all, but it's unnecessary for the terminals.
redirect_output = console == "internalConsole"
if redirect_output:
# sys.stdout buffering must be disabled - otherwise we won't see the output
# at all until the buffer fills up.
env["PYTHONUNBUFFERED"] = "1"
# Force UTF-8 output to minimize data loss due to re-encoding.
env["PYTHONIOENCODING"] = "utf-8"
if property_or_debug_option("waitOnNormalExit", "WaitOnNormalExit"):
if console == "internalConsole":
raise request.isnt_valid(
'"waitOnNormalExit" is not supported for "console":"internalConsole"'
)
debuggee.wait_on_exit_predicates.append(lambda code: code == 0)
if property_or_debug_option("waitOnAbnormalExit", "WaitOnAbnormalExit"):
if console == "internalConsole":
raise request.isnt_valid(
'"waitOnAbnormalExit" is not supported for "console":"internalConsole"'
)
debuggee.wait_on_exit_predicates.append(lambda code: code != 0)
if sys.version_info < (3,):
# Popen() expects command line and environment to be bytes, not Unicode.
# Assume that values are filenames - it's usually either that, or numbers -
# but don't allow encoding to fail if we guessed wrong.
encode = functools.partial(compat.filename_bytes, errors="replace")
cmdline = [encode(s) for s in cmdline]
env = {encode(k): encode(v) for k, v in env.items()}
debuggee.spawn(process_name, cmdline, env, redirect_output)
return {}
def terminate_request(request):
del debuggee.wait_on_exit_predicates[:]
request.respond({})
debuggee.kill()
def disconnect():
del debuggee.wait_on_exit_predicates[:]
debuggee.kill()
|
py | b40a5de86c6cf01be63c019deef704b7a4e6b9dd | #!/bin/usr/env python
#####################################################################
# - FILE: translate_m8_positions.py
# - DESC: Translate .m8 alignments from positions in MMseqs models ("p") to the corresponding
# positions in the HMMER models, based on HMMER consensus sequence ("s").
#
#####################################################################
import os,sys
import numpy as np
debug = True
# debug print
def dprint(*args):
if (debug):
print(args)
return
# load file with identity alignment b/t mmseqs and hmm models.
def load_identity(my_path, my_data):
my_fp = open(my_path, "r")
line = my_fp.readline()
while line:
fields = line.split()
p_name = fields[0]
# cigar = fields[len(fields)-1]
# parse data entry
my_data[p_name] = {}
my_data[p_name]["general"] = fields
p_name = fields[0]
s_name = fields[1]
p_len = int(fields[2])
p_beg = int(fields[3])
p_end = int(fields[4])
p_len = int(fields[2])
s_beg = int(fields[6])
s_end = int(fields[7])
alnlen = int(fields[8])
cigar = fields[9]
# convert cigar alignment to array of p_positions and s_positions
p_pos, s_pos = cigar_to_array(cigar, p_beg, p_end, s_beg, s_end)
# my_data[p_name]["states"] = states
# my_data[p_name]["runs"] = runs
my_data[p_name]["p_pos"] = p_pos
my_data[p_name]["s_pos"] = s_pos
line = my_fp.readline()
my_fp.close()
return
# convert cigar alignment to array of index p_positions and s_positions
def cigar_to_array(cigar, p_beg, p_end, s_beg, s_end):
# append to begining of list positions before the alignment
p_pos = [0, p_beg]
s_pos = [0, s_beg]
# break up cigar alignment...
# ...into lists of states
states = cigar
# remove all digits
for i in range(10):
states = states.replace(str(i), " ")
states = states.split()
# ...into list of runs
runs = cigar
# remove all state letters (M,I,D)
runs = runs.replace("M"," ").replace("I"," ").replace("D"," ")
runs = runs.split()
runs = [int(x) for x in runs]
# ...into list of positions
p_cnt = p_beg
s_cnt = s_beg
for i in range(len(runs)):
if (states[i] == "M"):
p_cnt += runs[i]
s_cnt += runs[i]
if (states[i] == "I"):
p_cnt += runs[i]
s_cnt += 0
if (states[i] == "D"):
p_cnt += 0
s_cnt += runs[i]
p_pos.append(p_cnt)
s_pos.append(s_cnt)
# append to end of list positions after the alignment
p_pos.append(p_end)
s_pos.append(s_end)
# print("P:({},{}), S:({},{})".format(p_beg,p_end,s_beg,s_end))
# print("CIGAR:", cigar)
# print("RUNS:", len(runs), runs)
# print("STATES:", len(states), states)
# print("P_POS:", len(p_pos), p_pos)
# print("S_POS:", len(s_pos), s_pos)
return p_pos, s_pos
# translate position of points b/t mmseqs model position -> hmmer model position
def translate_seed_range(fields, id_fields, my_stats):
# parse data from mmseqs alignments
p_name = fields[0]
p_alnbeg = int(fields[6])
p_alnend = int(fields[7])
# print("P_NAME:", p_name)
# print("ID_FIELDS:", id_fields)
# parse data from model identity alignments
p_len = int(id_fields["general"][2])
p_beg = int(id_fields["general"][3])
p_end = int(id_fields["general"][4])
s_len = int(id_fields["general"][5])
s_beg = int(id_fields["general"][6])
s_end = int(id_fields["general"][7])
p_pos = id_fields["p_pos"]
s_pos = id_fields["s_pos"]
# output
s_alnbeg = -1
s_alnend = -1
# translate each position for p_pos -> s_pos
my_translate = translate_seed_position_1
# print("P_POS:", len(p_pos), p_pos)
# print("S_POS:", len(s_pos), s_pos)
s_alnbeg, stat_beg = my_translate(p_alnbeg, s_pos, s_beg, s_end, s_len, p_pos, p_beg, p_end, p_len, my_stats)
s_alnend, stat_end = my_translate(p_alnend, s_pos, s_beg, s_end, s_len, p_pos, p_beg, p_end, p_len, my_stats)
my_stats[(stat_beg,stat_end)] += 1
# print("P:({},{}) -> S:({},{})".format(p_alnbeg, p_alnend, s_alnbeg, s_alnend))
# print("STATS:", stat_beg, stat_end)
return s_alnbeg, s_alnend
# Method #1: translate position of points b/t mmseqs model position -> hmmer model position
# - This method handles positions outside of the alignment by approximating the location in the
# hmmer model by the percentage of the outer nodes outside the alignment in the mmseqs model.
def translate_seed_position_1(p_alnpt, s_pos, s_beg, s_end, s_len, p_pos, p_beg, p_end, p_len, my_stats):
# output
s_alnpt = -1
# if alignment is before identity alignment
if ( p_alnpt < p_beg ):
my_stat = "before_aln"
p_perc = 0
if ( p_beg > 1 ):
p_perc = float(p_beg - p_alnpt) / float(p_beg)
s_alnpt = int(s_beg - ( p_perc * float(s_beg) ) )
s_alnpt = max(0, s_alnpt)
pass
# if alignment is inside identity alignment
elif ( p_alnpt <= p_end ):
my_stat = "inside_aln"
for i in range(1,len(p_pos)):
if ( p_pos[i] >= p_alnpt ):
if ( s_pos[i] != s_pos[i-1] ):
s_alnpt = s_pos[i-1] + (p_alnpt - p_pos[i-1])
else:
s_alnpt = s_pos[i-1]
break
pass
# if alignment is after identity alignment
elif ( p_alnpt > p_end ):
my_stat = "after_aln"
p_perc = 0
if ( p_len - p_end != 0 ):
p_perc = float(p_alnpt - p_end) / float(p_len - p_end)
s_alnpt = int( s_end + ( p_perc * float(s_len - s_end) ) )
s_alnpt = min(s_len-1, s_alnpt)
pass
# shouldn't get here
else:
pass
return s_alnpt, my_stat
# Method #2: translate position of points b/t mmseqs model position -> hmmer model position
# - This method handles positions outside of the alignment by truncating the results
# positions within the model alignment, then truncating the target sequence accordingly
# (required that mmseqs search for query/target include full traceback alignment "-a")
def translate_seed_position_2(p_alnpt, s_pos, s_beg, s_end, s_len, p_pos, p_beg, p_end, p_len, my_stats):
s_alnpt = -1
# if alignment is before identity alignment
if ( p_alnpt < p_beg ):
my_stat = "before_aln"
p_perc = 0
if ( p_beg > 1 ):
p_perc = float(p_beg - p_alnpt) / float(p_beg - 1)
s_alnpt = s_beg - int( p_perc * float(s_beg) )
pass
# if alignment is inside identity alignment
elif ( p_alnpt <= p_end ):
my_stat = "inside_aln"
for i in range(1,len(p_pos)):
if ( p_pos[i] >= p_alnpt ):
if ( s_pos[i] != s_pos[i-1] ):
s_alnpt = s_pos[i-1] + (p_alnpt - p_pos[i-1])
else:
s_alnpt = s_pos[i-1]
break
pass
# if alignment is after identity alignment
elif ( p_alnpt > p_beg ):
my_stat = "after_aln"
p_perc = 0
if ( p_len - p_end != 0 ):
p_perc = float(p_alnpt - p_end) / float(p_len - p_end)
s_alnpt = s_end + int( p_perc * float(s_len - s_end) )
pass
# shouldn't get here
else:
pass
return s_alnpt, my_stat
#####################################################################
## MAIN ###########################################################
#####################################################################
my_args = {}
my_fps = {}
my_data = {}
my_stats = {}
my_data["res_m8"] = {}
my_data["m2m_m8"] = {}
my_data["out_m8"] = {}
req_args = 3
if len(sys.argv) < req_args + 1:
print("# ERROR: incorrect number of args.")
print("# ./ <i:mmseqs_model_results_m8> <i:model2model_m8> <o:hmmer_model_results_m8>")
exit(-1)
# load args
my_args["res_m8"] = sys.argv[1]
my_args["m2m_m8"] = sys.argv[2]
my_args["out_m8"] = sys.argv[3]
# load identity data
load_identity(my_args["m2m_m8"], my_data["m2m_m8"])
# translate alignment seeds
my_fps["res_m8"] = open(my_args["res_m8"], "r")
my_fps["out_m8"] = open(my_args["out_m8"], "w")
for pos_beg in ["before_aln", "inside_aln", "after_aln"]:
for pos_end in ["before_aln", "inside_aln", "after_aln"]:
my_stats[(pos_beg,pos_end)] = 0
line = my_fps["res_m8"].readline()
while line:
try:
# get input data from mmseqs result entry
fields = line.split()
p_name = fields[0]
# get input data from model identity alignments
id_fields = my_data["m2m_m8"][p_name]
# translate viterbi seed positions to hmm model positions
s_alnbeg, s_alnend = translate_seed_range(fields, id_fields, my_stats)
# replace mmseqs profile indexes with hmmer profile indexes
# print("FIELDS:", len(fields), fields)
entry = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(
fields[0], fields[1], fields[2], fields[3], fields[4], fields[5],
int(s_alnbeg), int(s_alnend), fields[8], fields[9], fields[10], fields[11] )
my_fps["out_m8"].write( "{}\n".format(entry) )
# catch keyerror
except:
e = sys.exc_info()[0]
print("ERROR: %s" % (e) )
line = my_fps["res_m8"].readline()
my_fps["res_m8"].close()
my_fps["out_m8"].close()
print("# completed successfully.")
pos = ["before_aln", "inside_aln", "after_aln"]
for pos_beg in pos:
for pos_end in pos:
print("# stats({},{}): {}".format(pos_beg, pos_end, my_stats[(pos_beg,pos_end)]) )
|
py | b40a60ad479300483519a623541421655730cd2a | from util import *
import re
send_gdb('handle SIGKILL stop')
send_gdb('when')
expect_gdb(re.compile(r'Current event: (\d+)'))
event = eval(last_match().group(1));
send_gdb('when-ticks')
expect_gdb(re.compile(r'Current tick: (\d+)'))
ticks = eval(last_match().group(1));
if ticks != 0:
failed('ERROR in first "when-ticks"')
send_gdb('c')
send_gdb('when-ticks')
expect_gdb(re.compile(r'Current tick: (\d+)'))
ticks2 = eval(last_match().group(1));
if ticks2 < 99999:
failed('ERROR in second "when-ticks"')
send_gdb("seek-ticks %d" % ticks)
expect_gdb("Program stopped.")
send_gdb('when-ticks')
expect_gdb(re.compile(r'Current tick: (\d+)'))
ticks3 = eval(last_match().group(1));
if ticks3 != ticks:
failed('ERROR: Failed to seek back to ticks')
send_gdb('when')
expect_gdb(re.compile(r'Current event: (\d+)'))
event2 = eval(last_match().group(1));
if event2 != event:
failed('ERROR: Failed to seek back to ticks')
ok()
|
py | b40a614c03c541c5343c8d4509a0ad737a39d5ed | """
Contains helper functions and utilities for SAGA observing runs
"""
|
py | b40a61f6a57f75e55c7a1f01f104e199c71f32c9 | from app import db, login
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
import random
import string
import json
class Client(db.Model):
"""Defines a client"""
id = db.Column(db.Integer, primary_key=True, nullable=False)
uid = db.Column(db.String(6), unique=True, nullable=False)
name = db.Column(db.String(32), unique=True, nullable=False)
description = db.Column(db.String(128))
mail_to = db.Column(db.String(256), nullable=True)
xss = db.relationship('XSS', backref='client', lazy='dynamic')
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def to_dict_clients(self):
"""Returns a dict containing client's data to be displayed in a list of clients"""
data_num = 0
xss = XSS.query.filter_by(client_id=self.id).all()
for hit in xss:
data_num += len(json.loads(hit.data))
data = {
'owner_id': self.owner_id,
'id': self.id,
'name': self.name,
'reflected': XSS.query.filter_by(client_id=self.id).filter_by(xss_type='reflected').count(),
'stored': XSS.query.filter_by(client_id=self.id).filter_by(xss_type='stored').count(),
'data': data_num
}
return data
def to_dict_client(self):
"""Returns a dict containing client's data"""
owner = None
if self.owner_id != None:
owner = User.query.filter_by(id=self.owner_id).first().username
if owner == None:
owner = 'Nobody'
data = {
'owner': owner,
'id': self.id,
'name': self.name,
'description': self.description,
'mail_to': self.mail_to
}
return data
def gen_uid(self):
"""Generates a UID"""
characters = string.ascii_letters + string.digits
new_uid = ''.join(random.choice(characters) for i in range(6))
while(Client.query.filter_by(uid=new_uid).first() != None):
new_uid = ''.join(random.choice(characters) for i in range(6))
self.uid = new_uid
class XSS(db.Model):
"""Defines an XSS"""
id = db.Column(db.Integer, primary_key=True, nullable=False)
headers = db.Column(db.Text)
ip_addr = db.Column(db.String(15))
data = db.Column(db.Text)
timestamp = db.Column(db.Integer)
client_id = db.Column(db.Integer, db.ForeignKey('client.id'))
xss_type = db.Column(db.String(9))
def to_dict(self):
"""Returns full representation of XSS"""
data = {
'id': self.id,
'headers': json.loads(self.headers),
'ip_addr': self.ip_addr,
'data': json.loads(self.data) if self.data != None else self.data,
'timestamp': self.timestamp,
}
if 'fingerprint' in data['data'].keys():
data['data']['fingerprint'] = ''
if 'dom' in data['data'].keys():
data['data']['dom'] = ''
if 'screenshot' in data['data'].keys():
data['data']['screenshot'] = ''
return data
def to_dict_short(self):
"""Returns an abridged representation of XSS"""
data = {
'id': self.id,
'ip_addr': self.ip_addr,
'timestamp': self.timestamp,
}
return data
class User(UserMixin, db.Model):
"""Defines a user"""
id = db.Column(db.Integer, primary_key=True, nullable=False)
username = db.Column(db.String(128), unique=True, nullable=False)
password_hash = db.Column(db.String(95), nullable=False)
first_login = db.Column(db.Boolean, nullable=False, default=True)
is_admin = db.Column(db.Boolean, nullable=False, default=False)
client = db.relationship('Client', backref='owner', lazy='dynamic')
def set_password(self, password):
"""Sets user's password"""
self.password_hash = generate_password_hash(password)
def check_password(self, password):
"""Validates user's password"""
return check_password_hash(self.password_hash, password)
def generate_password(self):
"""Generates a new password"""
characters = string.ascii_letters + string.digits
return ''.join(random.choice(characters) for i in range(12))
def to_dict(self):
"""Returns a representation of the user"""
data = {
'id': self.id,
'username': self.username,
'first_login': self.first_login,
'is_admin': self.is_admin
}
return data
class Settings(db.Model):
"""Holds app settings"""
id = db.Column(db.Integer, primary_key=True, nullable=False)
smtp_host = db.Column(db.String(256), nullable=True)
smtp_port = db.Column(db.Integer, nullable=True)
starttls = db.Column(db.Boolean, default=False, nullable=True)
ssl_tls = db.Column(db.Boolean, default=False, nullable=True)
mail_from = db.Column(db.String(256), nullable=True)
smtp_user = db.Column(db.String(128), nullable=True)
smtp_pass = db.Column(db.String(128), nullable=True)
smtp_status = db.Column(db.Boolean, nullable=True)
def to_dict(self):
"""Returns the settings"""
data = {
'smtp_host': self.smtp_host,
'smtp_port': self.smtp_port,
'starttls': self.starttls,
'ssl_tls': self.ssl_tls,
'mail_from': self.mail_from,
'smtp_user': self.smtp_user,
'smtp_status': self.smtp_status
}
return data
@login.user_loader
def load_user(id):
"""Returns the user identifier for the session mechanism"""
return User.query.get(id)
def init_app(app):
"""Creates the admin user and the settings"""
with app.app_context():
if db.session.query(User).count() != 0:
print('[-] User creation not needed')
else:
user = User(username='admin', is_admin=1)
user.set_password('xss')
db.session.add(user)
db.session.commit()
print('[+] Initial user created')
if db.session.query(Settings).count() != 0:
print('[-] Settings initialization not needed')
else:
settings = Settings()
db.session.add(settings)
db.session.commit()
print('[+] Settings initialization successful')
|
py | b40a62f76a6db9cbe1d9bc3c084c13dfb9099cb2 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .container_service import *
from .get_container_service import *
from ._inputs import *
from . import outputs
|
py | b40a632091533d8b9f5fca58d57e97fd3959b1b1 | import os
import sys
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__)) + '/../../')
from experiments.experiment import *
from experiments.options import *
class VerifyResNetHypotheses(Experiment):
"""
Explore number of epochs for models.
"""
def __init__(self, args=None):
"""
Constructor, also sets options.
"""
super(VerifyResNetHypotheses, self).__init__(args)
self.class_latent_space_size = 10
""" (int) Latent space size. """
self.data_latent_space_size = 10
""" (int) Latent space size. """
decoder_file = 'Manifolds/EMNIST/vaegan2_%d_%d_abs_30_64_64_%g_%g_0_manual/decoder'
encoder_file = 'Manifolds/EMNIST/vaegan2_%d_%d_abs_30_64_64_%g_%g_0_manual/encoder'
self.betas = [
2.25,
5,
2.25,
2.25,
2.25,
2.25,
2.25,
3,
2.25,
2.25,
3
]
""" ([float]) Betas. """
self.gammas = [1]*11
""" ([float]) Gammas. """
self.decoder_files = []
""" ([str]) Decoder files for class manifolds. """
for label in range(self.labels):
self.decoder_files.append(
paths.state_file(decoder_file % (self.class_latent_space_size, label, self.betas[label], self.gammas[label]), experiment=''))
self.decoder_file = paths.state_file(decoder_file % (self.data_latent_space_size, -1, self.betas[-1], self.gammas[-1]), experiment='')
""" (str) Decoder file for data manifold. """
self.encoder_files = []
""" ([str]) Decoder files for class manifolds. """
for label in range(self.labels):
self.encoder_files.append(
paths.state_file(encoder_file % (self.class_latent_space_size, label, self.betas[label], self.gammas[label]), experiment=''))
self.encoder_file = paths.state_file(encoder_file % (self.data_latent_space_size, -1, self.betas[-1], self.gammas[-1]), experiment='')
""" (str) Decoder file for data manifold. """
self.manifold_directory = 'Manifolds/EMNIST/vaegan2'
""" (str) Manifold directory. """
self.label_index = 0
""" (int) Label index. """
self.train_images_file = paths.emnist_train_images_file()
""" (str) Train images file. """
self.test_images_file = paths.emnist_test_images_file()
""" (str) Test images file. """
self.train_codes_file = paths.emnist_train_labels_file()
""" (str) Train codes file. """
self.test_codes_file = paths.emnist_test_labels_file()
""" (str) Test codes file. """
self.max_iterations = 40
""" (int) Global number of iterations. """
self.off_training_epsilon = 0.3
""" (float) Epsilon for training. """
self.on_training_epsilon = 0.3
""" (float) Epsilon for training. """
self.on_data_training_epsilon = 0.1
""" (float) Epsilon for training. """
self.stn_N_theta = 6
self.stn_translation = '-0.15,0.15'
self.stn_shear = '-0.2,0.2'
self.stn_scale = '0.85,1.15'
self.stn_rotation = '%g,%g' % (-math.pi / 10, math.pi / 10)
assert self.stn_N_theta is not None
assert self.stn_translation is not None
assert self.stn_shear is not None
assert self.stn_scale is not None
assert self.stn_rotation is not None
self.max_iterations = 40
""" (int) Global number of iterations. """
self.off_attack_epsilons = [0.3]
""" ([flaot]) Epsilons for attacking. """
self.on_attack_epsilons = [0.3]
""" ([flaot]) Epsilons for attacking. """
self.on_data_attack_epsilons = [0.1]
""" ([float]) Epsilons for attacking. """
self.off_training_epsilon = 0.3
""" (float) Epsilon for training. """
self.on_training_epsilon = 0.3
""" (float) Epsilon for training. """
self.on_data_training_epsilon = 0.1
""" (float) Epsilon for training. """
assert self.args.training_sizes is not None
training_sizes = list(map(int, self.args.training_sizes.split(',')))
self.training_options = [TrainingOptions(training_size, 20) for training_size in training_sizes]
""" ([TrainingOptions]) Training options. """
self.off_attack_options = []
""" ([OffAttackOptions]) Attack options. """
self.off_training_options = OffAttackMadryLInfFullIterationOptions(self.off_training_epsilon, self.max_iterations)
""" (OffAttackOptions) Taining options. """
self.learned_on_class_attack_options = []
""" ([LearnedOnClassAttackOptions]) Attack options. """
self.learned_on_class_training_options = LearnedOnClassAttackMadryLInfFullIterationOptions(self.on_training_epsilon, self.max_iterations)
""" (LearnedOnClassAttackOptions) Training options. """
self.stn_augmentation_options = STNAugmentationLInfOptions(self.on_training_epsilon, 1, self.stn_N_theta, self.stn_translation, self.stn_shear, self.stn_scale, self.stn_rotation)
""" ([STNAugmentationOptions]) Augmentation options. """
self.stn_attack_options = []
""" ([STNAttackOptions]) Attack options. """
self.stn_training_options = STNAttackMadryLInfFullIterationOptions(self.on_training_epsilon, self.max_iterations, self.stn_N_theta, self.stn_translation, self.stn_shear, self.stn_scale, self.stn_rotation)
""" (STNAttackOptions) Training options. """
self.learned_on_data_attack_options = []
""" ([LearnedOnClassAttackOptions]) Attack options. """
self.learned_on_data_training_options = LearnedOnDataAttackMadryLInfFullIterationOptions(self.on_data_training_epsilon, self.max_iterations)
""" (LearnedOnClassAttackOptions) Training options. """
for epsilon in self.off_attack_epsilons:
self.off_attack_options += [
OffAttackMadryLInfOptions(epsilon, self.max_iterations),
]
if self.args.strong:
self.off_attack_options.append(OffAttackMadryLInfOptions(epsilon, 2*self.max_iterations))
for epsilon in self.on_attack_epsilons:
self.on_attack_options += [
OnAttackMadryLInfOptions(epsilon, self.max_iterations),
]
if self.args.strong:
self.on_attack_options.append(OnAttackMadryLInfOptions(epsilon, 2*self.max_iterations))
self.learned_on_class_attack_options += [
LearnedOnClassAttackMadryLInfOptions(epsilon, self.max_iterations),
]
if self.args.strong:
self.learned_on_class_attack_options.append(LearnedOnClassAttackMadryLInfOptions(epsilon, 2*self.max_iterations))
self.stn_attack_options += [
STNAttackMadryLInfOptions(epsilon, self.max_iterations, self.stn_N_theta, self.stn_translation, self.stn_shear, self.stn_scale, self.stn_rotation),
]
if self.args.strong:
self.stn_attack_options.append(STNAttackMadryLInfOptions(epsilon, 2*self.max_iterations, self.stn_N_theta, self.stn_translation, self.stn_shear, self.stn_scale, self.stn_rotation))
for epsilon in self.on_data_attack_epsilons:
self.learned_on_data_attack_options += [
LearnedOnDataAttackMadryLInfOptions(epsilon, self.max_iterations),
]
if self.args.strong:
self.learned_on_data_attack_options.append(LearnedOnDataAttackMadryLInfOptions(epsilon, 2*self.max_iterations))
def get_parser(self):
"""
Get parser.
:return: parser
:rtype: argparse.ArgumentParser
"""
parser = super(VerifyResNetHypotheses, self).get_parser()
parser.add_argument('-transfer', default=False, action='store_true', help='Transfer attacks.')
parser.add_argument('-learned_on_data', default=False, action='store_true', help='Learned on data attacks.')
parser.add_argument('-transfer_model_directory', default='', help='Transfer model directory.', type=str)
parser.add_argument('-strong', default='', help='Strong attack.', type=str)
return parser
def network_parameters(self):
"""
Get network parameters.
"""
if self.network_architecture is None:
self.network_architecture = [
'-network_architecture=resnet',
# '-network_no_batch_normalization',
# '-network_dropout',
# '-network_activation=relu',
'-network_channels=64',
'-network_units=2,2,2', # blocks for resnet14
]
return self.network_architecture
def experiment(self):
"""
Experiment.
"""
return 'VerifyResNetHypotheses/EMNIST/'
def run(self):
"""
Run.
"""
self.compute_class_theta()
if self.args.learned_on_data:
self.compute_data_theta()
models = []
for m in range(self.args.max_models - self.args.start_model):
for t in range(len(self.training_options)):
models = []
models.append(self.train_normal(t))
models.append(self.train_off_manifold_adversarial(t, self.off_training_options))
models.append(self.train_regular_augmented(t, self.stn_augmentation_options))
models.append(self.train_adversarial_augmented(t, self.stn_training_options))
models.append(self.train_learned_on_class_manifold_adversarial(t, self.learned_on_class_training_options))
if self.args.learned_on_data:
models.append(self.train_learned_on_data_manifold_adversarial(t, self.learned_on_data_training_options))
for model in models:
for a in range(len(self.off_attack_options)):
self.attack_off_manifold(model, t, a)
for model in models:
for a in range(len(self.learned_on_class_attack_options)):
self.attack_learned_on_class_manifold(model, t, a)
for model in models:
for a in range(len(self.learned_on_class_attack_options)):
self.attack_learned_on_class_manifold(model, t, a)
if self.args.learned_on_data:
for model in models:
for a in range(len(self.learned_on_data_attack_options)):
self.attack_learned_on_data_manifold(model, t, a)
self.training_options[t].model += 1
if self.args.transfer:
assert self.args.transfer_model_directory
for t in range(len(self.training_options)):
self.training_options[t].model = self.args.start_model
for m in range(self.args.max_models - self.args.start_model):
for t in range(len(self.training_options)):
models = ['normal']
models.append(self.model_name('off_manifold_adversarial', self.training_options[t], self.off_training_options))
models.append(self.model_name('regular_augmented', self.training_options[t], self.stn_augmentation_options))
models.append(self.model_name('adversarial_augmented', self.training_options[t], self.stn_training_options))
models.append(self.model_name('learned_on_class_manifold_adversarial', self.training_options[t], self.learned_on_class_training_options))
if self.args.transfer:
for model in models:
for a in range(len(self.off_attack_options)):
self.attack_off_manifold_transfer(self.args.transfer_model_directory, model, t, a)
if self.args.transfer:
for model in models:
for a in range(len(self.learned_on_class_attack_options)):
self.attack_learned_on_class_manifold_transfer(self.args.transfer_model_directory, model, t, a)
if self.args.learned_on_data:
for model in models:
for a in range(len(self.learned_on_data_attack_options)):
self.attack_learned_on_data_manifold_transfer(self.args.transfer_model_directory, model, t, a)
self.training_options[t].model += 1
return models
def evaluate(self, models):
"""
Evaluation.
"""
utils.makedir(paths.experiment_dir('0_evaluation'))
for model in models:
keys = []
self.statistics[model] = numpy.mean(self.results[model], axis=1)
self.statistics[model + '_off'] = numpy.mean(self.results[model + '_off'], axis=1)
keys.append('off')
self.statistics[model + '_learned_on_class'] = numpy.mean(self.results[model + '_learned_on_class'], axis=1)
keys.append('learned_on_class')
if self.args.transfer:
ignore = 0
if model == 'normal':
ignore = 1
self.statistics[model + '_off_transfer'] = numpy.mean(self.results[model + '_off_transfer'][:, ignore:, :, :, :], axis=1)
keys.append('off_transfer')
self.statistics[model + '_learned_on_class_transfer'] = numpy.mean(self.results[model + '_learned_on_class_transfer'][:, ignore:, :, :, :], axis=1)
keys.append('learned_on_class_transfer')
if self.args.learned_on_data:
self.statistics[model + '_learned_on_data'] = numpy.mean(self.results[model + '_learned_on_data'], axis=1)
keys.append('learned_on_data')
self.plots(models, keys)
def plots(self, models, keys):
"""
Plots.
"""
labels = ['Normal', 'OffAdvTrain', 'STNAugm', 'STNAdvTrain', 'OnClassAdvTrain']
if self.args.learned_on_data:
labels += ['OnDataAdvTrain']
norms = [1, 2, float('inf')]
attacks = keys
for model in models:
log('[Experiment] %s' % model)
for attack in attacks:
n = 2
errors = self.results[model][:, :, 1].flatten()
distances = self.results['%s_%s' % (model, attack)][:, :, 0, n, 2].flatten()
successes = self.results['%s_%s' % (model, attack)][:, :, 0, 0, 0].flatten()
distance_slope, _, distance_correlation, distance_p_value, _ = scipy.stats.linregress(errors, distances)
success_slope, _, success_correlation, success_p_value, _ = scipy.stats.linregress(errors, successes)
log('[Experiment] %s: distance=%g (corr=%g, p=%g) success=%g (corr=%g, p=%g)' % (
attack, distance_slope, distance_correlation, distance_p_value, success_slope, success_correlation, success_p_value
))
# Standard error
x = numpy.stack([self.statistics[model][:, 0] for model in models], axis=1).T
y = numpy.stack([self.statistics[model][:, 1] for model in models], axis=1).T
plot_file = paths.image_file('0_evaluation/plot_error')
plot.line(plot_file, x, y, labels)
latex_file = paths.latex_file('0_evaluation/latex_error')
latex.line(latex_file, x, y, labels)
for attack in attacks:
# Success rate
x = numpy.stack([self.statistics[model][:, 0] for model in models], axis=1).T
y = numpy.stack([self.statistics['%s_%s' % (model, attack)][:, 0, 0, 0] for model in models], axis=1).T
plot_file = paths.image_file('0_evaluation/plot_%s_success_rate' % attack)
plot.line(plot_file, x, y, labels)
latex_file = paths.latex_file('0_evaluation/latex_%s_success_rate' % attack)
latex.line(latex_file, x, y, labels)
# L_inf Distances Off-Manifold
n = 2
x = numpy.stack([self.statistics[model][:, 0] for model in models], axis=1).T
y = numpy.stack([self.statistics['%s_%s' % (model, attack)][:, 0, n, 2] for model in models], axis=1).T
plot_file = paths.image_file('0_evaluation/plot_%s_distance_%g' % (attack, norms[n]))
plot.line(plot_file, x, y, labels)
latex_file = paths.latex_file('0_evaluation/latex_%s_distance_%g' % (attack, norms[n]))
latex.line(latex_file, x, y, labels)
# Error and success rate
c = numpy.stack([self.results[model][:, :, 0].flatten() for model in models], axis=1).T
x = numpy.stack([self.results[model][:, :, 1].flatten() for model in models], axis=1).T
y = numpy.stack([self.results['%s_%s' % (model, attack)][:, :, 0, 0, 0].flatten() for model in models], axis=1).T
plot_file = paths.image_file('0_evaluation/plot_%s_error_success_rate' % attack)
plot.scatter2(plot_file, x, y, labels)
latex_file = paths.latex_file('0_evaluation/latex_%s_error_success_rate' % attack)
latex.scatter2(latex_file, x, y, labels, c)
# Error and success rate as line
c = numpy.stack([self.statistics[model][:, 0] for model in models], axis=1).T
x = numpy.stack([self.statistics[model][:, 1] for model in models], axis=1).T
y = numpy.stack([self.statistics['%s_%s' % (model, attack)][:, 0, 0, 0].flatten() for model in models], axis=1).T
plot_file = paths.image_file('0_evaluation/plot_%s_error_success_rate_line' % attack)
plot.line(plot_file, x, y, labels)
latex_file = paths.latex_file('0_evaluation/latex_%s_error_success_rate_line' % attack)
latex.line(latex_file, x, y, labels, c)
def visualize(self, models):
"""
Detect.
"""
assert len(self.training_options) == 1
for t in range(len(self.training_options)):
self.training_options[t].model = 0
for m in [0, 1, 4]:
if self.args.learned_on_data:
success_file = self.visualize_learned_on_data_manifold(models[m])
success_file = self.visualize_learned_on_class_manifold(models[m])
self.visualize_off_manifold(models[m], success_file)
for m in [0, 1, 4]:
log('[Experiment] visualized %s' % models[m])
self.detect_off_manifold(models[0])
self.detect_learned_on_class_manifold(models[0])
if __name__ == '__main__':
program = VerifyResNetHypotheses()
program.main() |
py | b40a6647ecbde44b4e6cdadb285b63078dd18a65 | import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.io import Dataset, DataLoader
import paddle.distributed as dist
import os
import re
import argparse
import numpy as np
from sklearn.model_selection import train_test_split
from gensim.models.keyedvectors import KeyedVectors
# yapf: disable
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--data_dir", type=str, default="./sentence-polarity-dataset-v1/", help="Specify the data dir.")
parser.add_argument("--pretrained_word2vec_file", type=str, default="./sentence-polarity-dataset-v1/GoogleNews-vectors-negative300.bin", help="Specify the pretrained word2vec model path.")
parser.add_argument("--logging_step", type=int, default=10, help="The frequency, in number of steps, the training logs are printed. (default: %(default)d)")
parser.add_argument("--epochs", type=int, default=20, help="Total number of training epochs to perform.")
parser.add_argument("--batch_size", type=int, default=64, help="Batch size per GPU/CPU for training.")
parser.add_argument("--dropout", type=float, default=0.5, help="The dropout rate.")
parser.add_argument("--lr", type=float, default=0.001, help="The initial learning rate.")
parser.add_argument("--weight_decay", type=float, default=0.0001, help="The weight decay for optimizer.")
parser.add_argument("--seed", type=int, default=2020, help="Random seed.")
parser.add_argument("--max_seq_len", type=int, default=256, help='max grad norm')
parser.add_argument("--sent_embedding_dim", type=int, default=64, help="The size of sentence embedding.")
parser.add_argument("--num_classes", type=int, default=2, help="The num of classification classes.")
parser.add_argument("--device", type=str, default="gpu", help="Device for selecting for the training.")
args = parser.parse_args()
return args
# yapf: enable
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_and_labels(positive_data_file, negative_data_file):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(
open(
positive_data_file, 'r', encoding='latin-1').readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(
open(
negative_data_file, 'r', encoding='latin-1').readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
x_text = list(map(lambda x: x.split(), x_text))
# Generate labels
positive_labels = [1 for _ in positive_examples]
negative_labels = [0 for _ in negative_examples]
y = np.array(positive_labels + negative_labels)
return [x_text, y]
class Word2VecBoWTextClassification(nn.Layer):
def __init__(self, word_embedding_dim, sent_embedding_dim, dropout,
num_classes):
super(Word2VecBoWTextClassification, self).__init__()
self._fc1 = nn.Linear(word_embedding_dim, sent_embedding_dim)
self._fc2 = nn.Linear(sent_embedding_dim, num_classes)
self._dropout = nn.Dropout(p=dropout)
def forward(self, inputs):
word_emb, seq_lens = inputs
# [batch_size, word_embedding_dim]
sent_emb = self.average_word_embedding(word_emb, seq_lens)
# [batch_size, sent_embedding_dim]
dense = self._fc1(sent_emb)
dense = self._dropout(dense)
# [batch_size, num_classes]
out = self._fc2(dense)
return out
def average_word_embedding(self, word_emb, seq_lens):
"""
Parameters:
word_emb: It is a Tensor with shape `[batch_size, max_seq_len, word_embedding_dim]`.
seq_lens: It is a Tensor with shape `[batch_size]`.
"""
seq_lens = paddle.unsqueeze(seq_lens, axis=-1)
seq_lens = paddle.cast(seq_lens, dtype=word_emb.dtype)
# [batch_size, word_embedding_dim]
sent_emb = paddle.sum(word_emb, axis=1)
# [batch_size, word_embedding_dim]
sent_emb = sent_emb / seq_lens
return sent_emb
class SentencePolarityDatasetV1(Dataset):
def __init__(self, x, y, gensim_model, max_seq_len):
super(SentencePolarityDatasetV1, self).__init__()
self._text = list(zip(x, y))
self._gensim_model = gensim_model
self._vector_size = gensim_model.vector_size
self._max_seq_len = max_seq_len
self._data = self.convert_to_ids()
def convert_to_ids(self):
data = []
for sentence, label in self._text:
sentence = sentence[:self._max_seq_len]
ids = np.zeros([len(sentence), self._vector_size], dtype=np.float32)
for i, word in enumerate(sentence):
if word in self._gensim_model:
ids[i] = self._gensim_model[word]
else:
ids[i] = np.random.uniform(-0.25, 0.25, self._vector_size)
data.append([ids, label])
return data
def __getitem__(self, idx):
ids = np.copy(self._data[idx][0])
label = self._data[idx][1]
return (ids, label)
def __len__(self):
return len(self._data)
def generate_batch(batch):
batch_ids, batch_label = zip(*batch)
max_len = max([ids.shape[0] for ids in batch_ids])
new_batch_ids = np.zeros(
[len(batch_ids), max_len, batch_ids[0].shape[1]], dtype=np.float32)
new_batch_label = []
new_batch_seq_len = []
for i, (ids, label) in enumerate(zip(batch_ids, batch_label)):
seq_len = ids.shape[0]
new_batch_ids[i, :seq_len, :] = ids
new_batch_label.append(label)
new_batch_seq_len.append(seq_len)
return new_batch_ids, new_batch_label, new_batch_seq_len
def train(args):
paddle.set_device(args.device)
if dist.get_world_size() > 1:
dist.init_parallel_env()
pos_file = os.path.join(args.data_dir, 'rt-polarity.pos')
neg_file = os.path.join(args.data_dir, 'rt-polarity.neg')
x_text, y = load_data_and_labels(pos_file, neg_file)
x_train, x_test, y_train, y_test = train_test_split(
x_text, y, test_size=0.1, random_state=args.seed)
#gensim_model = KeyedVectors.load_word2vec_format(args.pretrained_word2vec_file, binary=True, limit=300000)
gensim_model = KeyedVectors.load_word2vec_format(
args.pretrained_word2vec_file, binary=True)
print('\nLoaded word2vec from %s\n' % args.pretrained_word2vec_file)
train_dataset = SentencePolarityDatasetV1(x_train, y_train, gensim_model,
args.max_seq_len)
test_dataset = SentencePolarityDatasetV1(x_test, y_test, gensim_model,
args.max_seq_len)
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
return_list=True,
shuffle=True,
collate_fn=lambda batch: generate_batch(batch))
test_loader = DataLoader(
test_dataset,
batch_size=args.batch_size,
return_list=True,
shuffle=False,
collate_fn=lambda batch: generate_batch(batch))
model = Word2VecBoWTextClassification(gensim_model.vector_size,
args.sent_embedding_dim, args.dropout,
args.num_classes)
if dist.get_world_size() > 1:
model = paddle.DataParallel(model)
model.train()
adam = paddle.optimizer.Adam(
parameters=model.parameters(),
learning_rate=args.lr,
weight_decay=args.weight_decay)
criterion = nn.CrossEntropyLoss()
for epoch in range(args.epochs):
print('Epoch %d/%d' % (epoch + 1, args.epochs))
for step, batch_data in enumerate(train_loader, start=1):
ids, label, seq_lens = batch_data
output = model((ids, seq_lens))
loss = criterion(output, label)
loss.backward()
adam.step()
adam.clear_grad()
if step % args.logging_step == 0:
print('step %d, loss %.4f' % (step, loss.numpy()[0]))
acc = test(model, test_loader)
print('\ntest acc %.4f\n' % acc)
@paddle.no_grad()
def test(model, test_loader):
correct = num = 0
model.eval()
for batch_data in test_loader:
ids, label, seq_lens = batch_data
# [batch_size, 2]
output = model((ids, seq_lens))
num += label.shape[0]
predict = paddle.argmax(output, axis=1)
label = paddle.cast(label, dtype=predict.dtype)
correct += paddle.sum(paddle.cast(
predict == label, dtype='int64')).numpy()[0]
model.train()
return correct * 1.0 / num
if __name__ == '__main__':
args = parse_args()
train(args)
|
py | b40a66796a9ee7f8926be66c4695bdf0a249a52c | from .core import getMutate |
py | b40a66c00125e64dd4faa946ee9eb30c49fb73a8 | # coding=utf-8
import os
import pytest
from cytoolz import (
dissoc,
)
from eth_keyfile.keyfile import (
get_default_work_factor_for_kdf,
)
from eth_keys import (
keys,
)
from eth_utils import (
is_checksum_address,
to_bytes,
to_hex,
to_int,
)
from hexbytes import (
HexBytes,
)
from eth_account import (
Account,
)
from eth_account.messages import (
defunct_hash_message,
)
# from https://github.com/ethereum/tests/blob/3930ca3a9a377107d5792b3e7202f79c688f1a67/BasicTests/txtest.json # noqa: 501
ETH_TEST_TRANSACTIONS = [
{
"chainId": None,
"key": "c85ef7d79691fe79573b1a7064c19c1a9819ebdbd1faaab1a8ec92344438aaf4",
"nonce": 0,
"gasPrice": 1000000000000,
"gas": 10000,
"to": "0x13978aee95f38490e9769C39B2773Ed763d9cd5F",
"value": 10000000000000000,
"data": "",
"unsigned": "eb8085e8d4a510008227109413978aee95f38490e9769c39b2773ed763d9cd5f872386f26fc1000080808080", # noqa: 501
"signed": "f86b8085e8d4a510008227109413978aee95f38490e9769c39b2773ed763d9cd5f872386f26fc10000801ba0eab47c1a49bf2fe5d40e01d313900e19ca485867d462fe06e139e3a536c6d4f4a014a569d327dcda4b29f74f93c0e9729d2f49ad726e703f9cd90dbb0fbf6649f1" # noqa: 501
},
{
"chainId": None,
"key": "c87f65ff3f271bf5dc8643484f66b200109caffe4bf98c4cb393dc35740b28c0",
"nonce": 0,
"gasPrice": 1000000000000,
"gas": 10000,
"to": "",
"value": 0,
"data": "6025515b525b600a37f260003556601b596020356000355760015b525b54602052f260255860005b525b54602052f2", # noqa: 501
"unsigned": "f83f8085e8d4a510008227108080af6025515b525b600a37f260003556601b596020356000355760015b525b54602052f260255860005b525b54602052f2808080", # noqa: 501
"signed": "f87f8085e8d4a510008227108080af6025515b525b600a37f260003556601b596020356000355760015b525b54602052f260255860005b525b54602052f21ba05afed0244d0da90b67cf8979b0f246432a5112c0d31e8d5eedd2bc17b171c694a0bb1035c834677c2e1185b8dc90ca6d1fa585ab3d7ef23707e1a497a98e752d1b" # noqa: 501
},
{
"chainId": None,
"key": "c85ef7d79691fe79573b1a7064c19c1a9819ebdbd1faaab1a8ec92344438aaf4",
"nonce": 0,
"gasPrice": 1000000000000,
"gas": 10000,
"to": HexBytes("0x13978aee95f38490e9769C39B2773Ed763d9cd5F"),
"value": 10000000000000000,
"data": "",
"unsigned": "eb8085e8d4a510008227109413978aee95f38490e9769c39b2773ed763d9cd5f872386f26fc1000080808080", # noqa: 501
"signed": "f86b8085e8d4a510008227109413978aee95f38490e9769c39b2773ed763d9cd5f872386f26fc10000801ba0eab47c1a49bf2fe5d40e01d313900e19ca485867d462fe06e139e3a536c6d4f4a014a569d327dcda4b29f74f93c0e9729d2f49ad726e703f9cd90dbb0fbf6649f1" # noqa: 501
},
]
PRIVATE_KEY_AS_BYTES = b'unicorns' * 4
PRIVATE_KEY_AS_HEXSTR = '0x756e69636f726e73756e69636f726e73756e69636f726e73756e69636f726e73'
PRIVATE_KEY_AS_INT = 0x756e69636f726e73756e69636f726e73756e69636f726e73756e69636f726e73
PRIVATE_KEY_AS_OBJ = keys.PrivateKey(PRIVATE_KEY_AS_BYTES)
ACCT_ADDRESS = '0xa79F6f349C853F9Ea0B29636779ae3Cb4E3BA729'
PRIVATE_KEY_AS_BYTES_ALT = b'rainbows' * 4
PRIVATE_KEY_AS_HEXSTR_ALT = '0x7261696e626f77737261696e626f77737261696e626f77737261696e626f7773'
PRIVATE_KEY_AS_INT_ALT = 0x7261696e626f77737261696e626f77737261696e626f77737261696e626f7773
PRIVATE_KEY_AS_OBJ_ALT = keys.PrivateKey(PRIVATE_KEY_AS_BYTES_ALT)
ACCT_ADDRESS_ALT = '0xafd7f0E16A1814B854b45f551AFD493BE5F039F9'
@pytest.fixture(params=[PRIVATE_KEY_AS_INT, PRIVATE_KEY_AS_HEXSTR, PRIVATE_KEY_AS_BYTES, PRIVATE_KEY_AS_OBJ]) # noqa: 501
def PRIVATE_KEY(request):
return request.param
@pytest.fixture(params=[PRIVATE_KEY_AS_INT_ALT, PRIVATE_KEY_AS_HEXSTR_ALT, PRIVATE_KEY_AS_BYTES_ALT, PRIVATE_KEY_AS_OBJ_ALT]) # noqa: 501
def PRIVATE_KEY_ALT(request):
return request.param
@pytest.fixture
def acct(request):
return Account
def test_eth_account_default_kdf(acct, monkeypatch):
assert os.getenv('ETH_ACCOUNT_KDF') is None
assert acct.default_kdf == 'scrypt'
monkeypatch.setenv('ETH_ACCOUNT_KDF', 'pbkdf2')
assert os.getenv('ETH_ACCOUNT_KDF') == 'pbkdf2'
import importlib
from eth_account import account
importlib.reload(account)
assert account.Account.default_kdf == 'pbkdf2'
def test_eth_account_create_variation(acct):
account1 = acct.create()
account2 = acct.create()
assert account1 != account2
def test_eth_account_equality(acct, PRIVATE_KEY):
acct1 = acct.privateKeyToAccount(PRIVATE_KEY)
acct2 = acct.privateKeyToAccount(PRIVATE_KEY)
assert acct1 == acct2
def test_eth_account_privateKeyToAccount_reproducible(acct, PRIVATE_KEY):
account1 = acct.privateKeyToAccount(PRIVATE_KEY)
account2 = acct.privateKeyToAccount(PRIVATE_KEY)
assert bytes(account1) == PRIVATE_KEY_AS_BYTES
assert bytes(account1) == bytes(account2)
assert isinstance(str(account1), str)
def test_eth_account_privateKeyToAccount_diverge(acct, PRIVATE_KEY, PRIVATE_KEY_ALT):
account1 = acct.privateKeyToAccount(PRIVATE_KEY)
account2 = acct.privateKeyToAccount(PRIVATE_KEY_ALT)
assert bytes(account2) == PRIVATE_KEY_AS_BYTES_ALT
assert bytes(account1) != bytes(account2)
def test_eth_account_privateKeyToAccount_seed_restrictions(acct):
with pytest.raises(ValueError):
acct.privateKeyToAccount(b'')
with pytest.raises(ValueError):
acct.privateKeyToAccount(b'\xff' * 31)
with pytest.raises(ValueError):
acct.privateKeyToAccount(b'\xff' * 33)
def test_eth_account_privateKeyToAccount_properties(acct, PRIVATE_KEY):
account = acct.privateKeyToAccount(PRIVATE_KEY)
assert callable(account.signHash)
assert callable(account.signTransaction)
assert is_checksum_address(account.address)
assert account.address == ACCT_ADDRESS
assert account.privateKey == PRIVATE_KEY_AS_OBJ
def test_eth_account_create_properties(acct):
account = acct.create()
assert callable(account.signHash)
assert callable(account.signTransaction)
assert is_checksum_address(account.address)
assert isinstance(account.privateKey, bytes) and len(account.privateKey) == 32
def test_eth_account_recover_transaction_example(acct):
raw_tx_hex = '0xf8640d843b9aca00830e57e0945b2063246f2191f18f2675cedb8b28102e957458018025a00c753084e5a8290219324c1a3a86d4064ded2d15979b1ea790734aaa2ceaafc1a0229ca4538106819fd3a5509dd383e8fe4b731c6870339556a5c06feb9cf330bb' # noqa: E501
from_account = acct.recoverTransaction(raw_tx_hex)
assert from_account == '0xFeC2079e80465cc8C687fFF9EE6386ca447aFec4'
def test_eth_account_recover_transaction_with_literal(acct):
raw_tx = 0xf8640d843b9aca00830e57e0945b2063246f2191f18f2675cedb8b28102e957458018025a00c753084e5a8290219324c1a3a86d4064ded2d15979b1ea790734aaa2ceaafc1a0229ca4538106819fd3a5509dd383e8fe4b731c6870339556a5c06feb9cf330bb # noqa: E501
from_account = acct.recoverTransaction(raw_tx)
assert from_account == '0xFeC2079e80465cc8C687fFF9EE6386ca447aFec4'
def test_eth_account_recover_message(acct):
v, r, s = (
28,
'0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb3',
'0x3e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce',
)
message = "I♥SF"
msghash = defunct_hash_message(text=message)
from_account = acct.recoverHash(msghash, vrs=(v, r, s))
assert from_account == '0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
@pytest.mark.parametrize(
'signature_bytes',
[
# test signature bytes with standard v (0 in this case)
b'\x0cu0\x84\xe5\xa8)\x02\x192L\x1a:\x86\xd4\x06M\xed-\x15\x97\x9b\x1e\xa7\x90sJ\xaa,\xea\xaf\xc1"\x9c\xa4S\x81\x06\x81\x9f\xd3\xa5P\x9d\xd3\x83\xe8\xfeKs\x1chp3\x95V\xa5\xc0o\xeb\x9c\xf30\xbb\x00', # noqa: E501
# test signature bytes with chain-naive v (27 in this case)
b'\x0cu0\x84\xe5\xa8)\x02\x192L\x1a:\x86\xd4\x06M\xed-\x15\x97\x9b\x1e\xa7\x90sJ\xaa,\xea\xaf\xc1"\x9c\xa4S\x81\x06\x81\x9f\xd3\xa5P\x9d\xd3\x83\xe8\xfeKs\x1chp3\x95V\xa5\xc0o\xeb\x9c\xf30\xbb\x1b', # noqa: E501
],
ids=['test_sig_bytes_standard_v', 'test_sig_bytes_chain_naive_v']
)
def test_eth_account_recover_signature_bytes(acct, signature_bytes):
msg_hash = b'\xbb\r\x8a\xba\x9f\xf7\xa1<N,s{i\x81\x86r\x83{\xba\x9f\xe2\x1d\xaa\xdd\xb3\xd6\x01\xda\x00\xb7)\xa1' # noqa: E501
from_account = acct.recoverHash(msg_hash, signature=signature_bytes)
assert from_account == '0xFeC2079e80465cc8C687fFF9EE6386ca447aFec4'
def test_eth_account_recover_vrs(acct):
v, r, s = (
27,
5634810156301565519126305729385531885322755941350706789683031279718535704513,
15655399131600894366408541311673616702363115109327707006109616887384920764603,
)
msg_hash = b'\xbb\r\x8a\xba\x9f\xf7\xa1<N,s{i\x81\x86r\x83{\xba\x9f\xe2\x1d\xaa\xdd\xb3\xd6\x01\xda\x00\xb7)\xa1' # noqa: E501
from_account = acct.recoverHash(msg_hash, vrs=(v, r, s))
assert from_account == '0xFeC2079e80465cc8C687fFF9EE6386ca447aFec4'
from_account = acct.recoverHash(msg_hash, vrs=map(to_hex, (v, r, s)))
assert from_account == '0xFeC2079e80465cc8C687fFF9EE6386ca447aFec4'
def test_eth_account_recover_vrs_standard_v(acct):
v, r, s = (
0,
5634810156301565519126305729385531885322755941350706789683031279718535704513,
15655399131600894366408541311673616702363115109327707006109616887384920764603,
)
msg_hash = b'\xbb\r\x8a\xba\x9f\xf7\xa1<N,s{i\x81\x86r\x83{\xba\x9f\xe2\x1d\xaa\xdd\xb3\xd6\x01\xda\x00\xb7)\xa1' # noqa: E501
from_account = acct.recoverHash(msg_hash, vrs=(v, r, s))
assert from_account == '0xFeC2079e80465cc8C687fFF9EE6386ca447aFec4'
@pytest.mark.parametrize(
'message, expected',
[
(
'Message tö sign. Longer than hash!',
HexBytes('0x10c7cb57942998ab214c062e7a57220a174aacd80418cead9f90ec410eacada1'),
),
(
# Intentionally sneaky: message is a hexstr interpreted as text
'0x4d6573736167652074c3b6207369676e2e204c6f6e676572207468616e206861736821',
HexBytes('0x6192785e9ad00100e7332ff585824b65eafa30bc8f1265cf86b5368aa3ab5d56'),
),
(
'Hello World',
HexBytes('0xa1de988600a42c4b4ab089b619297c17d53cffae5d5120d82d8a92d0bb3b78f2'),
),
],
ids=['message_to_sign', 'hexstr_as_text', 'hello_world']
)
def test_eth_account_hash_message_text(message, expected):
assert defunct_hash_message(text=message) == expected
@pytest.mark.parametrize(
'message, expected',
[
(
'0x4d6573736167652074c3b6207369676e2e204c6f6e676572207468616e206861736821',
HexBytes('0x10c7cb57942998ab214c062e7a57220a174aacd80418cead9f90ec410eacada1'),
),
(
'0x29d9f7d6a1d1e62152f314f04e6bd4300ad56fd72102b6b83702869a089f470c',
HexBytes('0xe709159ef0e6323c705786fc50e47a8143812e9f82f429e585034777c7bf530b'),
),
],
ids=['hexbytes_1', 'hexbytes_2']
)
def test_eth_account_hash_message_hexstr(acct, message, expected):
assert defunct_hash_message(hexstr=message) == expected
@pytest.mark.parametrize(
'message, key, expected_bytes, expected_hash, v, r, s, signature',
(
(
'Some data',
'0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318',
b'Some data',
HexBytes('0x1da44b586eb0729ff70a73c326926f6ed5a25f5b056e7f47fbc6e58d86871655'),
28,
83713930994764734002432606962255364472443135907807238282514898577139886061053,
43435997768575461196683613590576722655951133545204789519877940758262837256233,
HexBytes('0xb91467e570a6466aa9e9876cbcd013baba02900b8979d43fe208a4a4f339f5fd6007e74cd82e037b800186422fc2da167c747ef045e5d18a5f5d4300f8e1a0291c'), # noqa: E501
),
(
'Some data',
keys.PrivateKey(HexBytes('0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318')), # noqa: E501
b'Some data',
HexBytes('0x1da44b586eb0729ff70a73c326926f6ed5a25f5b056e7f47fbc6e58d86871655'),
28,
83713930994764734002432606962255364472443135907807238282514898577139886061053,
43435997768575461196683613590576722655951133545204789519877940758262837256233,
HexBytes('0xb91467e570a6466aa9e9876cbcd013baba02900b8979d43fe208a4a4f339f5fd6007e74cd82e037b800186422fc2da167c747ef045e5d18a5f5d4300f8e1a0291c'), # noqa: E501
),
(
'10284',
'0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318',
b'10284',
HexBytes('0x0a162a5efbba02f38db3114531c8acba39fe676f09f7e471d93e8a06c471821c'),
27,
143748089818580655331728101695676826715814583506606354117109114714663470502,
227853308212209543997879651656855994238138056366857653269155208245074180053,
HexBytes('0x00515bc8fd32264e21ec0820e8c5123ed58c1195c9ea17cb018b1ad4073cc5a60080f5dcec397a5a8c523082bfa41771568903aa554ec06ba8475ca9050fb7d51b'), # noqa: E501
),
),
ids=['web3js_hex_str_example', 'web3js_eth_keys.datatypes.PrivateKey_example', '31byte_r_and_s'], # noqa: E501
)
def test_eth_account_sign(acct, message, key, expected_bytes, expected_hash, v, r, s, signature):
msghash = defunct_hash_message(text=message)
assert msghash == expected_hash
signed = acct.signHash(msghash, private_key=key)
assert signed.messageHash == expected_hash
assert signed.v == v
assert signed.r == r
assert signed.s == s
assert signed.signature == signature
account = acct.privateKeyToAccount(key)
msghash = defunct_hash_message(text=message)
assert account.signHash(msghash) == signed
@pytest.mark.parametrize(
'txn, private_key, expected_raw_tx, tx_hash, r, s, v',
(
(
{
'to': '0xF0109fC8DF283027b6285cc889F5aA624EaC1F55',
'value': 1000000000,
'gas': 2000000,
'gasPrice': 234567897654321,
'nonce': 0,
'chainId': 1
},
'0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318',
HexBytes('0xf86a8086d55698372431831e848094f0109fc8df283027b6285cc889f5aa624eac1f55843b9aca008025a009ebb6ca057a0535d6186462bc0b465b561c94a295bdb0621fc19208ab149a9ca0440ffd775ce91a833ab410777204d5341a6f9fa91216a6f3ee2c051fea6a0428'), # noqa: E501
HexBytes('0xd8f64a42b57be0d565f385378db2f6bf324ce14a594afc05de90436e9ce01f60'),
4487286261793418179817841024889747115779324305375823110249149479905075174044,
30785525769477805655994251009256770582792548537338581640010273753578382951464,
37,
),
(
{
'to': '0xF0109fC8DF283027b6285cc889F5aA624EaC1F55',
'value': 1000000000,
'gas': 2000000,
'gasPrice': 234567897654321,
'nonce': 0,
'chainId': 1
},
keys.PrivateKey(HexBytes('0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318')), # noqa: E501
HexBytes('0xf86a8086d55698372431831e848094f0109fc8df283027b6285cc889f5aa624eac1f55843b9aca008025a009ebb6ca057a0535d6186462bc0b465b561c94a295bdb0621fc19208ab149a9ca0440ffd775ce91a833ab410777204d5341a6f9fa91216a6f3ee2c051fea6a0428'), # noqa: E501
HexBytes('0xd8f64a42b57be0d565f385378db2f6bf324ce14a594afc05de90436e9ce01f60'),
4487286261793418179817841024889747115779324305375823110249149479905075174044,
30785525769477805655994251009256770582792548537338581640010273753578382951464,
37,
),
(
{
'to': '0xF0109fC8DF283027b6285cc889F5aA624EaC1F55',
'value': 0,
'gas': 31853,
'gasPrice': 0,
'nonce': 0,
'chainId': 1
},
'0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318',
HexBytes('0xf85d8080827c6d94f0109fc8df283027b6285cc889f5aa624eac1f558080269f22f17b38af35286ffbb0c6376c86ec91c20ecbad93f84913a0cc15e7580cd99f83d6e12e82e3544cb4439964d5087da78f74cefeec9a450b16ae179fd8fe20'), # noqa: E501
HexBytes('0xb0c5e2c6b29eeb0b9c1d63eaa8b0f93c02ead18ae01cb7fc795b0612d3e9d55a'),
61739443115046231975538240097110168545680205678104352478922255527799426265,
232940010090391255679819602567388136081614408698362277324138554019997613600,
38,
),
),
ids=['web3js_hex_str_example', 'web3js_eth_keys.datatypes.PrivateKey_example', '31byte_r_and_s'], # noqa: E501
)
def test_eth_account_sign_transaction(acct, txn, private_key, expected_raw_tx, tx_hash, r, s, v):
signed = acct.signTransaction(txn, private_key)
assert signed.r == r
assert signed.s == s
assert signed.v == v
assert signed.rawTransaction == expected_raw_tx
assert signed.hash == tx_hash
account = acct.privateKeyToAccount(private_key)
assert account.signTransaction(txn) == signed
@pytest.mark.parametrize(
'transaction',
ETH_TEST_TRANSACTIONS,
)
def test_eth_account_sign_transaction_from_eth_test(acct, transaction):
expected_raw_txn = transaction['signed']
key = transaction['key']
unsigned_txn = dissoc(transaction, 'key', 'signed', 'unsigned')
# validate r, in order to validate the transaction hash
# There is some ambiguity about whether `r` will always be deterministically
# generated from the transaction hash and private key, mostly due to code
# author's ignorance. The example test fixtures and implementations seem to agree, so far.
# See ecdsa_raw_sign() in /eth_keys/backends/native/ecdsa.py
signed = acct.signTransaction(unsigned_txn, key)
assert signed.r == to_int(hexstr=expected_raw_txn[-130:-66])
# confirm that signed transaction can be recovered to the sender
expected_sender = acct.privateKeyToAccount(key).address
assert acct.recoverTransaction(signed.rawTransaction) == expected_sender
@pytest.mark.parametrize(
'transaction',
ETH_TEST_TRANSACTIONS,
)
def test_eth_account_recover_transaction_from_eth_test(acct, transaction):
raw_txn = transaction['signed']
key = transaction['key']
expected_sender = acct.privateKeyToAccount(key).address
assert acct.recoverTransaction(raw_txn) == expected_sender
def get_encrypt_test_params():
"""
Params for testing Account#encrypt. Due to not being able to provide fixtures to
pytest.mark.parameterize, we opt for creating the params in a non-fixture method
here instead of providing fixtures for the private key and password.
"""
key = '0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318'
key_bytes = to_bytes(hexstr=key)
private_key = keys.PrivateKey(HexBytes(key))
password = 'test!'
# 'private_key, password, kdf, iterations, expected_decrypted_key, expected_kdf'
return [
(
key,
password,
None,
None,
key_bytes,
'scrypt'
),
(
private_key,
password,
None,
None,
private_key.to_bytes(),
'scrypt'
),
(
key,
password,
'pbkdf2',
None,
key_bytes,
'pbkdf2'
),
(
key,
password,
None,
1024,
key_bytes,
'scrypt'
),
(
key,
password,
'pbkdf2',
1024,
key_bytes,
'pbkdf2'
),
(
key,
password,
'scrypt',
1024,
key_bytes,
'scrypt'
),
]
@pytest.mark.parametrize(
'private_key, password, kdf, iterations, expected_decrypted_key, expected_kdf',
get_encrypt_test_params(),
ids=[
'hex_str',
'eth_keys.datatypes.PrivateKey',
'hex_str_provided_kdf',
'hex_str_default_kdf_provided_iterations',
'hex_str_pbkdf2_provided_iterations',
'hex_str_scrypt_provided_iterations',
]
)
def test_eth_account_encrypt(
acct,
private_key,
password,
kdf,
iterations,
expected_decrypted_key,
expected_kdf):
if kdf is None:
encrypted = acct.encrypt(private_key, password, iterations=iterations)
else:
encrypted = acct.encrypt(private_key, password, kdf=kdf, iterations=iterations)
assert encrypted['address'] == '2c7536e3605d9c16a7a3d7b1898e529396a65c23'
assert encrypted['version'] == 3
assert encrypted['crypto']['kdf'] == expected_kdf
if iterations is None:
expected_iterations = get_default_work_factor_for_kdf(expected_kdf)
else:
expected_iterations = iterations
if expected_kdf == 'pbkdf2':
assert encrypted['crypto']['kdfparams']['c'] == expected_iterations
elif expected_kdf == 'scrypt':
assert encrypted['crypto']['kdfparams']['n'] == expected_iterations
else:
raise Exception("test must be upgraded to confirm iterations with kdf %s" % expected_kdf)
decrypted_key = acct.decrypt(encrypted, password)
assert decrypted_key == expected_decrypted_key
@pytest.mark.parametrize(
'private_key, password, kdf, iterations, expected_decrypted_key, expected_kdf',
get_encrypt_test_params(),
ids=[
'hex_str',
'eth_keys.datatypes.PrivateKey',
'hex_str_provided_kdf',
'hex_str_default_kdf_provided_iterations',
'hex_str_pbkdf2_provided_iterations',
'hex_str_scrypt_provided_iterations',
]
)
def test_eth_account_prepared_encrypt(
acct,
private_key,
password,
kdf,
iterations,
expected_decrypted_key,
expected_kdf):
account = acct.privateKeyToAccount(private_key)
if kdf is None:
encrypted = account.encrypt(password, iterations=iterations)
else:
encrypted = account.encrypt(password, kdf=kdf, iterations=iterations)
assert encrypted['address'] == '2c7536e3605d9c16a7a3d7b1898e529396a65c23'
assert encrypted['version'] == 3
assert encrypted['crypto']['kdf'] == expected_kdf
if iterations is None:
expected_iterations = get_default_work_factor_for_kdf(expected_kdf)
else:
expected_iterations = iterations
if expected_kdf == 'pbkdf2':
assert encrypted['crypto']['kdfparams']['c'] == expected_iterations
elif expected_kdf == 'scrypt':
assert encrypted['crypto']['kdfparams']['n'] == expected_iterations
else:
raise Exception("test must be upgraded to confirm iterations with kdf %s" % expected_kdf)
decrypted_key = acct.decrypt(encrypted, password)
assert isinstance(decrypted_key, HexBytes)
assert decrypted_key == expected_decrypted_key
|
py | b40a6752af7b8d2d8fc2619df0a82207dcc44538 | N = int(input())
A = 0
B = 0
M = 1
while N != 0:
if N % 10 == 7:
A += 6 * M
B += 1 * M
else:
A += (N % 10) * M
N //= 10
M *= 10
print(A, B)
|
py | b40a67f87507a5482e89480cb97e5712fdef81f9 | import datetime
import hashlib
import json
import re
import time
import zlib
from collections import namedtuple
from sqlalchemy import (
Column,
BigInteger,
Integer,
LargeBinary,
Float,
Boolean,
String,
ForeignKey,
Enum,
ForeignKeyConstraint,
DateTime,
types,
Text,
Index,
JSON,
or_,
and_,
Sequence,
func,
event,
)
from sqlalchemy.orm import relationship, synonym, joinedload
from anchore_engine.utils import ensure_str, ensure_bytes
from anchore_engine.util.rpm import compare_versions as rpm_compare_versions
from anchore_engine.util.deb import compare_versions as dpkg_compare_versions
from anchore_engine.util.apk import compare_versions as apkg_compare_versions
from anchore_engine.util.langpack import compare_versions as langpack_compare_versions
try:
from anchore_engine.subsys import logger as log
except:
import logging
logger = logging.getLogger(__name__)
log = logger
from .common import Base, UtilMixin, StringJSON
from .common import get_thread_scoped_session
DistroTuple = namedtuple("DistroTuple", ["distro", "version", "flavor"])
base_score_key = "base_score"
exploitability_score_key = "exploitability_score"
impact_score_key = "impact_score"
base_metrics_key = "base_metrics"
cvss_v3_key = "cvss_v3"
cvss_v2_key = "cvss_v2"
# Feeds
class FeedMetadata(Base, UtilMixin):
__tablename__ = "feeds"
name = Column(String, primary_key=True)
description = Column(String)
access_tier = Column(Integer)
groups = relationship(
"FeedGroupMetadata", back_populates="feed", cascade="all, delete-orphan"
)
last_full_sync = Column(DateTime)
last_update = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
enabled = Column(Boolean, default=True)
@classmethod
def get_by_name(cls, name):
return FeedMetadata.query.filter(name=name).scalar()
def __repr__(self):
return "<{}(name={}, access_tier={}, enabled={}, created_at={}>".format(
self.__class__,
self.name,
self.access_tier,
self.enabled,
self.created_at.isoformat(),
)
def to_json(self, include_groups=True):
j = super().to_json()
if include_groups:
j["groups"] = [g.to_json() for g in self.groups]
else:
j["groups"] = None
return j
class FeedGroupMetadata(Base, UtilMixin):
__tablename__ = "feed_groups"
name = Column(String, primary_key=True)
feed_name = Column(String, ForeignKey(FeedMetadata.name), primary_key=True)
description = Column(String)
access_tier = Column(Integer)
last_sync = Column(DateTime, nullable=True)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
last_update = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
enabled = Column(Boolean, default=True)
count = Column(
BigInteger
) # To cache the row count of the group between feed syncs to avoid extra row count ops
feed = relationship("FeedMetadata", back_populates="groups")
def __repr__(self):
return (
"<{} name={}, feed={}, access_tier={}, enabled={}, created_at={}>".format(
self.__class__,
self.name,
self.feed_name,
self.access_tier,
self.enabled,
self.created_at,
)
)
def to_json(self, include_feed=False):
j = super().to_json()
if include_feed:
j["feed"] = self.feed.to_json(include_groups=False) # Avoid the loop
else:
j["feed"] = None # Ensure no non-serializable stuff
return j
class GenericFeedDataRecord(Base):
"""
A catch-all record for feed data without a specific schema mapping
"""
__tablename__ = "feed_group_data"
feed = Column(String, primary_key=True)
group = Column(String, primary_key=True)
id = Column(String, primary_key=True)
created_at = Column(
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow,
nullable=False,
)
updated_at = Column(
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow,
nullable=False,
)
data = Column(
StringJSON, nullable=False
) # TODO: make this a JSON type for dbs that support it
class GemMetadata(Base):
__tablename__ = "feed_data_gem_packages"
name = Column(String, primary_key=True)
id = Column(BigInteger)
latest = Column(String)
licenses_json = Column(StringJSON)
authors_json = Column(StringJSON)
versions_json = Column(StringJSON)
created_at = Column(
DateTime, default=datetime.datetime.utcnow
) # TODO: make these server-side
updated_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
def __repr__(self):
return "<{} name={}, id={}, created_at={}>".format(
self.__class__, self.name, self.id, self.created_at
)
def key_tuple(self):
return self.name
class NpmMetadata(Base):
__tablename__ = "feed_data_npm_packages"
name = Column(String, primary_key=True)
sourcepkg = Column(String)
lics_json = Column(StringJSON)
origins_json = Column(StringJSON)
latest = Column(String)
versions_json = Column(StringJSON)
created_at = Column(
DateTime, default=datetime.datetime.utcnow
) # TODO: make these server-side
updated_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
def __repr__(self):
return "<{} name={}, sourcepkg={}, created_at={}>".format(
self.__class__, self.name, self.sourcepkg, self.created_at
)
def key_tuple(self):
return self.name
class Vulnerability(Base):
"""
A vulnerability/CVE record. Can come from many sources. Includes some specific fields and also a general
metadata field that is json encoded string
"""
__tablename__ = "feed_data_vulnerabilities"
id = Column(String, primary_key=True) # CVE Id, RHSA id, etc
namespace_name = Column(String, primary_key=True) # e.g. centos, rhel, "debian"
severity = Column(
Enum(
"Unknown",
"Negligible",
"Low",
"Medium",
"High",
"Critical",
name="vulnerability_severities",
),
nullable=False,
)
description = Column(Text, nullable=True)
link = Column(String, nullable=True)
metadata_json = Column(StringJSON, nullable=True)
cvss2_vectors = Column(String, nullable=True)
cvss2_score = Column(Float, nullable=True)
created_at = Column(
DateTime, default=datetime.datetime.utcnow
) # TODO: make these server-side
updated_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
vulnerable_in = relationship(
"VulnerableArtifact", back_populates="parent", cascade="all, delete-orphan"
)
fixed_in = relationship(
"FixedArtifact", back_populates="parent", cascade="all, delete-orphan"
)
@property
def additional_metadata(self):
if self.metadata_json:
if isinstance(self.metadata_json, str):
return json.loads(self.metadata_json)
return self.metadata_json
return None
@additional_metadata.setter
def additional_metadata(self, value):
m = {}
if value:
if isinstance(value, str):
m = json.loads(value)
elif isinstance(value, dict):
m.update(value)
else:
m = {"metadata": value}
self.metadata_json = m
# self.metadata_json = json.dumps(value)
def __repr__(self):
return "<{} id={}, namespace_name={}, severity={}, created_at={}>".format(
self.__class__, self.id, self.namespace_name, self.severity, self.created_at
)
def current_package_vulnerabilities(self, db_session):
"""
Return a list of all packages that are marked as vulnerable to this item
:return: list of ImagePackageVulnerability objects
"""
return (
db_session.query(ImagePackageVulnerability)
.filter(
ImagePackageVulnerability.vulnerability_id == self.id,
ImagePackageVulnerability.vulnerability_namespace_name
== self.namespace_name,
)
.all()
)
def is_empty(self):
"""
Can a package be vulnerable to this, or is it an empty definition.
:return: boolean
"""
# return not self.vulnerable_in and not self.fixed_in
return not self.fixed_in
def get_cvss_severity(self):
sev = "Unknown"
try:
score = self.cvss2_score
if score <= 3.9:
sev = "Low"
elif score <= 6.9:
sev = "Medium"
elif score <= 10.0:
sev = "High"
else:
sev = "Unknown"
except:
sev = "Unknown"
return sev
def get_nvd_vulnerabilities(self, cvss_version=3, _nvd_cls=None, _cpe_cls=None):
ret = []
db = get_thread_scoped_session()
if not _nvd_cls or not _cpe_cls:
_nvd_cls, _cpe_cls = select_nvd_classes(db)
try:
cves = self.get_nvd_identifiers(_nvd_cls, _cpe_cls)
nvd_records = db.query(_nvd_cls).filter(_nvd_cls.name.in_(cves)).all()
except Exception as err:
log.warn(
"failed to gather NVD information for vulnerability due to exception: {}".format(
str(err)
)
)
nvd_records = None
if nvd_records:
ret = nvd_records
return ret
def get_nvd_identifiers(self, _nvd_cls, _cpe_cls):
cves = []
try:
if self.id.startswith("CVE-"):
cves = [self.id]
if self.metadata_json and self.metadata_json.get("CVE", []):
for cve_el in self.metadata_json.get("CVE", []):
if type(cve_el) == dict:
# RHSA and ELSA internal elements are dicts
cve_id = cve_el.get("Name", None)
elif type(cve_el) == str:
# ALAS internal elements are just CVE ids
cve_id = cve_el
else:
cve_id = None
if cve_id and cve_id not in cves:
cves.append(cve_id)
except Exception as err:
log.warn(
"failed to gather NVD information for vulnerability due to exception: {}".format(
str(err)
)
)
return cves
class VulnerableArtifact(Base):
"""
An entity affected by a vulnerability, typically an os or application package.
Typically populated by CVEs with specific vulnerable packages enumerated.
"""
__tablename__ = "feed_data_vulnerabilities_vulnerable_artifacts"
vulnerability_id = Column(String, primary_key=True)
namespace_name = Column(String, primary_key=True)
name = Column(String, primary_key=True)
version = Column(String, primary_key=True)
version_format = Column(String)
epochless_version = Column(String)
include_previous_versions = Column(Boolean, default=True)
parent = relationship("Vulnerability", back_populates="vulnerable_in")
created_at = Column(DateTime, default=datetime.datetime.utcnow)
updated_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
# This is necessary for ensuring correct FK behavior against a composite foreign key
__table_args__ = (
ForeignKeyConstraint(
columns=(vulnerability_id, namespace_name),
refcolumns=(Vulnerability.id, Vulnerability.namespace_name),
),
{},
)
def __repr__(self):
return "<{} name={}, version={}, vulnerability_id={}, namespace_name={}, created_at={}>".format(
self.__class__,
self.name,
self.version,
self.vulnerability_id,
self.namespace_name,
self.updated_at,
)
def match_and_vulnerable(self, package_obj):
"""
Given a VulnerableArtifact record, is the given package object a match indicating that the package is vulnerable.
:param vuln_obj:
:param package_obj:
:param has_fix: boolean indicating if there is a corresponding fix record
:return:
"""
vuln_obj = self
if not isinstance(vuln_obj, VulnerableArtifact):
raise TypeError(
"Expected a VulnerableArtifact type, got: {}".format(type(vuln_obj))
)
dist = DistroNamespace.for_obj(package_obj)
flavor = dist.flavor
# Double-check names
if (
vuln_obj.name != package_obj.name
and vuln_obj.name != package_obj.normalized_src_pkg
):
log.warn(
"Name mismatch in vulnerable check. This should not happen: Fix: {}, Package: {}, Package_Norm_Src: {}, Package_Src: {}".format(
vuln_obj.name,
package_obj.name,
package_obj.normalized_src_pkg,
package_obj.src_pkg,
)
)
return False
# Is it a catch-all record? Explicit 'None' or 'all' versions indicate all versions of the named package are vulnerable.
# Or is it an exact version match?
if vuln_obj.epochless_version in ["all", "None"] or (
package_obj.fullversion == vuln_obj.epochless_version
or package_obj.version == vuln_obj.epochless_version
):
return True
else:
return False
class FixedArtifact(Base):
"""
A record indicating an artifact version that marks a fix for a vulnerability
"""
__tablename__ = "feed_data_vulnerabilities_fixed_artifacts"
vulnerability_id = Column(String, primary_key=True)
namespace_name = Column(String, primary_key=True)
name = Column(String, primary_key=True)
version = Column(String, primary_key=True)
version_format = Column(String)
epochless_version = Column(String)
include_later_versions = Column(Boolean, default=True)
parent = relationship("Vulnerability", back_populates="fixed_in")
vendor_no_advisory = Column(Boolean, default=False)
fix_metadata = Column(StringJSON, nullable=True)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
updated_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
fix_observed_at = Column(DateTime)
__table_args__ = (
ForeignKeyConstraint(
columns=(vulnerability_id, namespace_name),
refcolumns=(Vulnerability.id, Vulnerability.namespace_name),
),
{},
)
@staticmethod
def _fix_observed_at_update(mapper, connection, target):
if not target.fix_observed_at and target.version and target.version != "None":
target.fix_observed_at = datetime.datetime.utcnow()
@classmethod
def __declare_last__(cls):
event.listen(cls, "before_update", cls._fix_observed_at_update)
event.listen(cls, "before_insert", cls._fix_observed_at_update)
def __repr__(self):
return "<{} name={}, version={}, vulnerability_id={}, namespace_name={}, created_at={}>".format(
self.__class__,
self.name,
self.version,
self.vulnerability_id,
self.namespace_name,
self.created_at,
)
def match_but_not_fixed(self, package_obj):
"""
Does the FixedArtifact match the package as a vulnerability such that the fix indicates the package is *not* fixed and is
therefore vulnerable.
:param fix_obj: as FixedArtifact record
:param package_obj: an ImagePackage record
:return: True if the names match and the fix record indicates the package is vulnerable and not fixed. False if no match or fix is applied and no vulnerability match
"""
fix_obj = self
if not isinstance(fix_obj, FixedArtifact):
raise TypeError(
"Expected a FixedArtifact type, got: {}".format(type(fix_obj))
)
dist = DistroNamespace.for_obj(package_obj)
flavor = dist.flavor
log.spew(
"Package: {}, Fix: {}, Flavor: {}".format(
package_obj.name, fix_obj.name, flavor
)
)
# Double-check names
if (
fix_obj.name != package_obj.name
and fix_obj.name != package_obj.normalized_src_pkg
):
log.warn(
"Name mismatch in fix check. This should not happen: Fix: {}, Package: {}, Package_Norm_Src: {}, Package_Src: {}".format(
fix_obj.name,
package_obj.name,
package_obj.normalized_src_pkg,
package_obj.src_pkg,
)
)
return False
# Handle the case where there is no version, indicating no fix available, all versions are vulnerable.
# Is it a catch-all record? Explicit 'None' versions indicate all versions of the named package are vulnerable.
if fix_obj.version == "None":
return True
# Is the package older than the fix?
if (
flavor == "RHEL"
): # compare full package version with full fixed-in version, epoch handled in compare fn. fixes issue-265
if rpm_compare_versions(package_obj.fullversion, fix_obj.version) < 0:
log.spew(
"rpm Compared: {} < {}: True".format(
package_obj.fullversion, fix_obj.version
)
)
return True
elif (
flavor == "DEB"
): # compare full package version with full fixed-in version, epoch handled in compare fn. fixes issue-265
if dpkg_compare_versions(package_obj.fullversion, "lt", fix_obj.version):
log.spew(
"dpkg Compared: {} < {}: True".format(
package_obj.fullversion, fix_obj.version
)
)
return True
elif (
flavor == "ALPINE"
): # compare full package version with epochless fixed-in version
if apkg_compare_versions(
package_obj.fullversion, "lt", fix_obj.epochless_version
):
log.spew(
"apkg Compared: {} < {}: True".format(
package_obj.fullversion, fix_obj.epochless_version
)
)
return True
if package_obj.pkg_type in ["java", "maven", "npm", "gem", "python", "js"]:
if package_obj.pkg_type in ["java", "maven"]:
pomprops = package_obj.get_pom_properties()
if pomprops:
pkgkey = "{}:{}".format(
pomprops.get("groupId"), pomprops.get("artifactId")
)
pkgversion = pomprops.get("version", None)
else:
pkgversion = package_obj.version
else:
pkgversion = package_obj.fullversion
if langpack_compare_versions(
fix_obj.version, pkgversion, language=package_obj.pkg_type
):
return True
# Newer or the same
return False
class NvdMetadata(Base):
__tablename__ = "feed_data_nvd_vulnerabilities"
name = Column(String, primary_key=True)
namespace_name = Column(String, primary_key=True) # e.g. nvddb:2018"
severity = Column(
Enum(
"Unknown",
"Negligible",
"Low",
"Medium",
"High",
"Critical",
name="vulnerability_severities",
),
nullable=False,
primary_key=True,
)
vulnerable_configuration = Column(StringJSON)
vulnerable_software = Column(StringJSON)
summary = Column(String)
cvss = Column(StringJSON)
cvssv3 = None
cvssv2 = None
vulnerable_cpes = relationship(
"CpeVulnerability", back_populates="parent", cascade="all, delete-orphan"
)
created_at = Column(
DateTime, default=datetime.datetime.utcnow
) # TODO: make these server-side
updated_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
def __repr__(self):
return "<{} name={}, created_at={}>".format(
self.__class__, self.name, self.created_at
)
@property
def normalized_id(self):
return self.name
@property
def description(self):
return self.summary if self.summary else ""
@property
def references(self):
return []
def get_max_base_score_nvd(self, cvss_version=3):
if cvss_version == 3:
score = None
elif cvss_version == 2:
score = self.cvss.get(base_metrics_key, {}).get("score", None)
else:
log.warn(
"invalid cvss version specified as input ({})".format(cvss_version)
)
score = None
if score is None:
ret = -1.0
else:
try:
ret = float(score)
except:
ret = -1.0
return ret
def get_max_exploitability_score_nvd(self, cvss_version=3):
return -1.0
def get_max_impact_score_nvd(self, cvss_version=3):
return -1.0
def get_max_cvss_score_nvd(self, cvss_version=3):
if cvss_version == 3:
ret = {
base_score_key: -1.0,
exploitability_score_key: -1.0,
impact_score_key: -1.0,
}
elif cvss_version == 2:
ret = {
base_score_key: self.get_max_base_score_nvd(cvss_version),
exploitability_score_key: -1.0,
impact_score_key: -1.0,
}
else:
log.warn(
"invalid cvss version specified as input ({})".format(cvss_version)
)
ret = {
base_score_key: -1.0,
exploitability_score_key: -1.0,
impact_score_key: -1.0,
}
return ret
def get_cvss_scores_nvd(self):
ret = [
{
"id": self.name,
cvss_v2_key: self.get_max_cvss_score_nvd(cvss_version=2),
cvss_v3_key: self.get_max_cvss_score_nvd(cvss_version=3),
}
]
return ret
def get_cvss_data_nvd(self):
ret = [
{
"id": self.name,
cvss_v2_key: self.cvssv2 if self.cvssv2 else None,
cvss_v3_key: self.cvssv3 if self.cvssv3 else None,
}
]
return ret
# vendor scores
def get_max_base_score_vendor(self, cvss_version=3):
return -1.0
def get_max_exploitability_score_vendor(self, cvss_version=3):
return -1.0
def get_max_impact_score_vendor(self, cvss_version=3):
return -1.0
def get_max_cvss_score_vendor(self, cvss_version=3):
ret = {
base_score_key: -1.0,
exploitability_score_key: -1.0,
impact_score_key: -1.0,
}
return ret
def get_cvss_scores_vendor(self):
return []
def get_cvss_data_vendor(self):
return []
@property
def link(self):
return "https://nvd.nist.gov/vuln/detail/{}".format(self.name)
def key_tuple(self):
return self.name
class NvdV2Metadata(Base):
__tablename__ = "feed_data_nvdv2_vulnerabilities"
name = Column(String, primary_key=True)
namespace_name = Column(String, primary_key=True) # e.g. nvddb:2018"
severity = Column(
Enum(
"Unknown",
"Negligible",
"Low",
"Medium",
"High",
"Critical",
name="vulnerability_severities",
),
nullable=False,
index=True,
)
description = Column(String, nullable=True)
cvss_v2 = Column(JSON, nullable=True)
cvss_v3 = Column(JSON, nullable=True)
link = Column(String, nullable=True)
references = Column(JSON, nullable=True)
vulnerable_cpes = relationship(
"CpeV2Vulnerability", back_populates="parent", cascade="all, delete-orphan"
)
created_at = Column(
DateTime, default=datetime.datetime.utcnow
) # TODO: make these server-side
updated_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
def __repr__(self):
return "<{} name={}, created_at={}>".format(
self.__class__, self.name, self.created_at
)
@property
def normalized_id(self):
return self.name
def _get_score(self, metric, score_key):
if metric:
score = metric.get(base_metrics_key).get(score_key, -1.0)
try:
score = float(score)
except:
score = -1.0
else:
score = -1.0
return score
def _get_metric(self, cvss_version=3):
metric = None
if cvss_version == 3:
metric = self.cvss_v3
elif cvss_version == 2:
metric = self.cvss_v2
else:
log.warn(
"invalid cvss version specified as input ({})".format(cvss_version)
)
return metric
def get_max_base_score_nvd(self, cvss_version=3):
metric = self._get_metric(cvss_version)
return self._get_score(metric, base_score_key)
def get_max_exploitability_score_nvd(self, cvss_version=3):
metric = self._get_metric(cvss_version)
return self._get_score(metric, exploitability_score_key)
def get_max_impact_score_nvd(self, cvss_version=3):
metric = self._get_metric(cvss_version)
return self._get_score(metric, impact_score_key)
def get_max_cvss_score_nvd(self, cvss_version=3):
metric = self._get_metric(cvss_version)
ret = {
base_score_key: self._get_score(metric, base_score_key),
exploitability_score_key: self._get_score(metric, exploitability_score_key),
impact_score_key: self._get_score(metric, impact_score_key),
}
return ret
def get_cvss_scores_nvd(self):
ret = [
{
"id": self.name,
cvss_v2_key: self.get_max_cvss_score_nvd(cvss_version=2),
cvss_v3_key: self.get_max_cvss_score_nvd(cvss_version=3),
}
]
return ret
def get_cvss_data_nvd(self):
ret = [
{
"id": self.name,
cvss_v2_key: self._get_metric(cvss_version=2),
cvss_v3_key: self._get_metric(cvss_version=3),
}
]
return ret
# vendor scores
def get_max_base_score_vendor(self, cvss_version=3):
return -1.0
def get_max_exploitability_score_vendor(self, cvss_version=3):
return -1.0
def get_max_impact_score_vendor(self, cvss_version=3):
return -1.0
def get_max_cvss_score_vendor(self, cvss_version=3):
ret = {
base_score_key: -1.0,
exploitability_score_key: -1.0,
impact_score_key: -1.0,
}
return ret
def get_cvss_scores_vendor(self):
return []
def get_cvss_data_vendor(self):
return []
def key_tuple(self):
return self.name
class VulnDBMetadata(Base):
__tablename__ = "feed_data_vulndb_vulnerabilities"
name = Column(String, primary_key=True)
namespace_name = Column(String, primary_key=True) # e.g. vulndb:vulnerabilities
severity = Column(
Enum(
"Unknown",
"Negligible",
"Low",
"Medium",
"High",
"Critical",
name="vulnerability_severities",
),
nullable=False,
index=True,
)
title = Column(String, nullable=True)
description = Column(String, nullable=True)
solution = Column(String, nullable=True)
vendor_product_info = Column(JSON, nullable=True)
references = Column(JSON, nullable=True)
vulnerable_packages = Column(JSON, nullable=True)
vulnerable_libraries = Column(JSON, nullable=True)
vendor_cvss_v2 = Column(JSON, nullable=True)
vendor_cvss_v3 = Column(JSON, nullable=True)
nvd = Column(JSON, nullable=True)
vuln_metadata = Column(JSON, nullable=True)
cpes = relationship(
"VulnDBCpe", back_populates="parent", cascade="all, delete-orphan"
)
# unaffected_cpes = relationship('VulnDBUnaffectedCpe', back_populates='parent', cascade='all, delete-orphan')
created_at = Column(DateTime, default=datetime.datetime.utcnow)
updated_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
def __repr__(self):
return "<{} name={}, created_at={}>".format(
self.__class__, self.name, self.created_at
)
@property
def normalized_id(self):
"""normalized_id will inspect the in coming external
references and return a cve id in the case of a single
match against vulndb information.
"""
res = _get_one_or_none("source", "CVE ID", self.references)
if res and res.get("url"):
# findall should return a single id list ['2020-11989']
cve_id_col = re.findall(r"\=(\d+\-\d+)", res.get("url"))
if cve_id_col:
return "CVE-" + cve_id_col[0]
return self.name
def _get_max_cvss_v3_metric_nvd(self):
cvss_v3 = None
if self.nvd:
if len(self.nvd) == 1:
cvss_v3 = self.nvd[0].get(cvss_v3_key, None)
else:
max_score = None
for nvd_item in self.nvd:
if nvd_item.get(cvss_v3_key, None):
if (
not max_score
or nvd_item.get(cvss_v3_key)
.get(base_metrics_key)
.get(base_score_key)
> max_score
):
max_score = (
nvd_item.get(cvss_v3_key)
.get(base_metrics_key)
.get(base_score_key)
)
cvss_v3 = nvd_item.get(cvss_v3_key)
else:
continue
return cvss_v3
def _get_max_cvss_v2_metric_nvd(self):
cvss_v2 = None
if self.nvd:
if len(self.nvd) == 1:
cvss_v2 = self.nvd[0].get(cvss_v2_key, None)
else:
max_score = None
for nvd_item in self.nvd:
if nvd_item.get(cvss_v2_key, None):
if (
not max_score
or nvd_item.get(cvss_v2_key)
.get(base_metrics_key)
.get(base_score_key)
> max_score
):
max_score = (
nvd_item.get(cvss_v2_key)
.get(base_metrics_key)
.get(base_score_key)
)
cvss_v2 = nvd_item.get(cvss_v2_key)
else:
continue
return cvss_v2
def _get_max_cvss_metric_nvd(self, cvss_version):
"""
[
{
"id": "CVE-2019-5440",
"cvss_v2": {
"version": "2.0",
"vector_string": "AV:N/AC:M/Au:N/C:P/I:P/A:P",
"base_metrics": {
"base_score": 6.8,
"exploitability_score": 8.6,
"impact_score": 6.4,
"severity": "Medium"
...
}
},
"cvss_v3": {
"version": "3.0",
"vector_string": "CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:H/I:H/A:H",
"base_metrics": {
"base_score": 8.1,
"exploitability_score": 2.2,
"impact_score": 6.0,
"severity": "High"
...
}
}
},
{
"id": "CVE-2019-5441",
"cvss_v2": {
"version": "2.0",
"vector_string": "AV:N/AC:M/Au:N/C:P/I:P/A:P",
"base_metrics": {
"base_score": 6.8,
"exploitability_score": 8.6,
"impact_score": 6.4,
"severity": "Medium"
...
}
},
"cvss_v3": {
"version": "3.0",
"vector_string": "CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:H/I:H/A:H",
"base_metrics": {
"base_score": 8.1,
"exploitability_score": 2.2,
"impact_score": 6.0,
"severity": "High"
...
}
}
},
]
:param cvss_version:
:return:
"""
metric = None
if cvss_version == 3:
metric = self._get_max_cvss_v3_metric_nvd()
elif cvss_version == 2:
metric = self._get_max_cvss_v2_metric_nvd()
else:
log.warning(
"invalid cvss version specified as input ({})".format(cvss_version)
)
return metric
def _get_max_cvss_v3_metric_rbs(self):
cvss_v3 = None
if self.vendor_cvss_v3:
if len(self.vendor_cvss_v3) == 1:
cvss_v3 = self.vendor_cvss_v3[0]
else:
max_score = None
for cvss_item in self.vendor_cvss_v3:
if (
not max_score
or cvss_item.get(base_metrics_key).get(base_score_key)
> max_score
):
max_score = cvss_item.get(base_metrics_key).get(base_score_key)
cvss_v3 = cvss_item
else:
continue
return cvss_v3
def _get_highest_cvss_v2_rbs(self):
cvss_v2 = None
if self.vendor_cvss_v2:
if len(self.vendor_cvss_v2) == 1:
cvss_v2 = self.vendor_cvss_v2[0]
else:
max_score = None
for cvss_item in self.vendor_cvss_v2:
if (
not max_score
or cvss_item.get(base_metrics_key).get(base_score_key)
> max_score
):
max_score = cvss_item.get(base_metrics_key).get(base_score_key)
cvss_v2 = cvss_item
else:
continue
return cvss_v2
def _get_max_cvss_metric_rbs(self, cvss_version):
"""
cvss_version is a list in format
[
{
"version": "3.0",
"vector_string": "CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:H/I:H/A:H",
"base_metrics": {
"base_score": 8.1,
"exploitability_score": 2.2,
"impact_score": 6.0,
"severity": "High"
...
}
},
{
"version": "3.0",
"vector_string": "CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:H/I:H/A:H",
"base_metrics": {
"base_score": 8.1,
"exploitability_score": 2.2,
"impact_score": 6.0,
"severity": "High"
...
}
}
]
:param cvss_version:
:return:
"""
metric = None
if cvss_version == 3:
metric = self._get_max_cvss_v3_metric_rbs()
elif cvss_version == 2:
metric = self._get_highest_cvss_v2_rbs()
else:
log.warning(
"invalid cvss version specified as input ({})".format(cvss_version)
)
return metric
def _get_score(self, metric, score_key):
if metric:
score = metric.get(base_metrics_key).get(score_key, -1.0)
try:
score = float(score)
except:
score = -1.0
else:
score = -1.0
return score
# nvd scores
def get_max_base_score_nvd(self, cvss_version=3):
metric = self._get_max_cvss_metric_nvd(cvss_version)
return self._get_score(metric, base_score_key)
def get_max_exploitability_score_nvd(self, cvss_version=3):
metric = self._get_max_cvss_metric_nvd(cvss_version)
return self._get_score(metric, exploitability_score_key)
def get_max_impact_score_nvd(self, cvss_version=3):
metric = self._get_max_cvss_metric_nvd(cvss_version)
return self._get_score(metric, impact_score_key)
def get_max_cvss_score_nvd(self, cvss_version=3):
metric = self._get_max_cvss_metric_nvd(cvss_version)
ret = {
base_score_key: self._get_score(metric, base_score_key),
exploitability_score_key: self._get_score(metric, exploitability_score_key),
impact_score_key: self._get_score(metric, impact_score_key),
}
return ret
def get_cvss_scores_nvd(self):
result = []
for nvd_cvss_item in self.get_cvss_data_nvd():
"""
nvd_cvss_item is in the format
{
"id": "CVE-2019-5441",
"cvss_v2": {
"version": "2.0",
"vector_string": "AV:N/AC:M/Au:N/C:P/I:P/A:P",
"base_metrics": {
"base_score": 6.8,
"exploitability_score": 8.6,
"impact_score": 6.4,
"severity": "Medium"
...
}
},
"cvss_v3": {
"version": "3.0",
"vector_string": "CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:H/I:H/A:H",
"base_metrics": {
"base_score": 8.1,
"exploitability_score": 2.2,
"impact_score": 6.0,
"severity": "High"
...
}
}
}
"""
cvss_v2_metric = nvd_cvss_item.get(cvss_v2_key, None)
cvss_v3_metric = nvd_cvss_item.get(cvss_v3_key, None)
score_item = {
"id": nvd_cvss_item.get("id"),
cvss_v2_key: {
base_score_key: self._get_score(cvss_v2_metric, base_score_key),
exploitability_score_key: self._get_score(
cvss_v2_metric, exploitability_score_key
),
impact_score_key: self._get_score(cvss_v2_metric, impact_score_key),
},
cvss_v3_key: {
base_score_key: self._get_score(cvss_v3_metric, base_score_key),
exploitability_score_key: self._get_score(
cvss_v3_metric, exploitability_score_key
),
impact_score_key: self._get_score(cvss_v3_metric, impact_score_key),
},
}
result.append(score_item)
def get_cvss_data_nvd(self):
return self.nvd if self.nvd else []
# vendor scores
def get_max_base_score_vendor(self, cvss_version=3):
metric = self._get_max_cvss_metric_rbs(cvss_version)
return self._get_score(metric, base_score_key)
def get_max_exploitability_score_vendor(self, cvss_version=3):
metric = self._get_max_cvss_metric_rbs(cvss_version)
return self._get_score(metric, exploitability_score_key)
def get_max_impact_score_vendor(self, cvss_version=3):
metric = self._get_max_cvss_metric_rbs(cvss_version)
return self._get_score(metric, impact_score_key)
def get_max_cvss_score_vendor(self, cvss_version=3):
metric = self._get_max_cvss_metric_rbs(cvss_version)
ret = {
base_score_key: self._get_score(metric, base_score_key),
exploitability_score_key: self._get_score(metric, exploitability_score_key),
impact_score_key: self._get_score(metric, impact_score_key),
}
return ret
def get_cvss_scores_vendor(self):
results = []
if self.vendor_cvss_v2:
for cvss_v2_item in self.vendor_cvss_v2:
results.append(
{
"id": self.name,
cvss_v2_key: {
base_score_key: self._get_score(
cvss_v2_item, base_score_key
),
exploitability_score_key: self._get_score(
cvss_v2_item, exploitability_score_key
),
impact_score_key: self._get_score(
cvss_v2_item, impact_score_key
),
},
cvss_v3_key: {
base_score_key: -1.0,
exploitability_score_key: -1.0,
impact_score_key: -1.0,
},
}
)
if self.vendor_cvss_v3:
for cvss_v3_item in self.vendor_cvss_v3:
results.append(
{
"id": self.name,
cvss_v2_key: {
base_score_key: -1.0,
exploitability_score_key: -1.0,
impact_score_key: -1.0,
},
cvss_v3_key: {
base_score_key: self._get_score(
cvss_v3_item, base_score_key
),
exploitability_score_key: self._get_score(
cvss_v3_item, exploitability_score_key
),
impact_score_key: self._get_score(
cvss_v3_item, impact_score_key
),
},
}
)
return results
def get_cvss_data_vendor(self):
results = []
if self.vendor_cvss_v2:
for cvss_v2_item in self.vendor_cvss_v2:
results.append(
{"id": self.name, cvss_v2_key: cvss_v2_item, cvss_v3_key: None}
)
if self.vendor_cvss_v3:
for cvss_v3_item in self.vendor_cvss_v3:
results.append(
{"id": self.name, cvss_v2_key: None, cvss_v3_key: cvss_v3_item}
)
return results
@property
def link(self):
return None
@property
def vulnerable_cpes(self):
return [cpe for cpe in self.cpes if cpe.is_affected]
def key_tuple(self):
return self.name
class CpeVulnerability(Base):
__tablename__ = "feed_data_cpe_vulnerabilities"
feed_name = Column(String, primary_key=True)
namespace_name = Column(String, primary_key=True)
vulnerability_id = Column(String, primary_key=True)
severity = Column(
Enum(
"Unknown",
"Negligible",
"Low",
"Medium",
"High",
"Critical",
name="vulnerability_severities",
),
nullable=False,
primary_key=True,
)
cpetype = Column(String, primary_key=True)
vendor = Column(String, primary_key=True)
name = Column(String, primary_key=True)
version = Column(String, primary_key=True)
update = Column(String, primary_key=True)
meta = Column(String, primary_key=True)
link = Column(String, nullable=True)
parent = relationship("NvdMetadata", back_populates="vulnerable_cpes")
created_at = Column(
DateTime, default=datetime.datetime.utcnow
) # TODO: make these server-side
updated_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
# This is necessary for ensuring correct FK behavior against a composite foreign key
__table_args__ = (
ForeignKeyConstraint(
columns=(vulnerability_id, namespace_name, severity),
refcolumns=(
NvdMetadata.name,
NvdMetadata.namespace_name,
NvdMetadata.severity,
),
),
Index("ix_feed_data_cpe_vulnerabilities_name_version", name, version),
Index(
"ix_feed_data_cpe_vulnerabilities_fk",
vulnerability_id,
namespace_name,
severity,
),
{},
)
def __repr__(self):
return "<{} feed_name={}, vulnerability_id={}, name={}, version={}, created_at={}>".format(
self.__class__,
self.feed_name,
self.vulnerability_id,
self.name,
self.version,
self.created_at.isoformat(),
)
def get_cpestring(self):
ret = None
try:
final_cpe = ["cpe", "-", "-", "-", "-", "-", "-"]
final_cpe[1] = self.cpetype
final_cpe[2] = self.vendor
final_cpe[3] = self.name
final_cpe[4] = self.version
final_cpe[5] = self.update
final_cpe[6] = self.meta
ret = ":".join(final_cpe)
except:
ret = None
return ret
def get_fixed_in(self):
return []
class CpeV2Vulnerability(Base):
__tablename__ = "feed_data_cpev2_vulnerabilities"
feed_name = Column(String, primary_key=True)
namespace_name = Column(String, primary_key=True)
vulnerability_id = Column(String, primary_key=True)
part = Column(String, primary_key=True)
vendor = Column(String, primary_key=True)
product = Column(String, primary_key=True)
name = synonym("product")
version = Column(String, primary_key=True)
update = Column(String, primary_key=True)
edition = Column(String, primary_key=True)
language = Column(String, primary_key=True)
sw_edition = Column(String, primary_key=True)
target_sw = Column(String, primary_key=True)
target_hw = Column(String, primary_key=True)
other = Column(String, primary_key=True)
parent = relationship("NvdV2Metadata", back_populates="vulnerable_cpes")
created_at = Column(
DateTime, default=datetime.datetime.utcnow
) # TODO: make these server-side
updated_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
# This is necessary for ensuring correct FK behavior against a composite foreign key
__table_args__ = (
ForeignKeyConstraint(
columns=(vulnerability_id, namespace_name),
refcolumns=(NvdV2Metadata.name, NvdV2Metadata.namespace_name),
),
Index("ix_feed_data_cpev2_vulnerabilities_name_version", product, version),
Index(
"ix_feed_data_cpev2_vulnerabilities_fk", vulnerability_id, namespace_name
),
{},
)
def __repr__(self):
return "<{} feed_name={}, vulnerability_id={}, product={}, version={}, created_at={}>".format(
self.__class__,
self.feed_name,
self.vulnerability_id,
self.product,
self.version,
self.created_at.isoformat(),
)
def get_cpestring(self):
ret = None
try:
final_cpe = ["cpe", "-", "-", "-", "-", "-", "-"]
final_cpe[1] = "/" + self.part
final_cpe[2] = self.vendor
final_cpe[3] = self.product
final_cpe[4] = self.version
final_cpe[5] = self.update
final_cpe[6] = self.other
ret = ":".join(final_cpe)
except:
ret = None
return ret
def get_cpe23string(self):
ret = None
try:
final_cpe = [
"cpe",
"2.3",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
]
final_cpe[2] = self.part
final_cpe[3] = self.vendor
final_cpe[4] = self.product
final_cpe[5] = self.version
final_cpe[6] = self.update
final_cpe[7] = self.edition
final_cpe[8] = self.language
final_cpe[9] = self.sw_edition
final_cpe[10] = self.target_sw
final_cpe[11] = self.target_hw
final_cpe[12] = self.other
ret = ":".join(final_cpe)
except:
ret = None
return ret
def get_fixed_in(self):
return []
class VulnDBCpe(Base):
__tablename__ = "feed_data_vulndb_cpes"
feed_name = Column(String, primary_key=True)
namespace_name = Column(String, primary_key=True)
vulnerability_id = Column(String, primary_key=True)
part = Column(String, primary_key=True)
vendor = Column(String, primary_key=True)
product = Column(String, primary_key=True)
name = synonym("product")
version = Column(String, primary_key=True)
update = Column(String, primary_key=True)
edition = Column(String, primary_key=True)
language = Column(String, primary_key=True)
sw_edition = Column(String, primary_key=True)
target_sw = Column(String, primary_key=True)
target_hw = Column(String, primary_key=True)
other = Column(String, primary_key=True)
is_affected = Column(Boolean, primary_key=True)
parent = relationship("VulnDBMetadata", back_populates="cpes")
created_at = Column(
DateTime, default=datetime.datetime.utcnow
) # TODO: make these server-side
updated_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
# This is necessary for ensuring correct FK behavior against a composite foreign key
__table_args__ = (
ForeignKeyConstraint(
columns=(vulnerability_id, namespace_name),
refcolumns=(VulnDBMetadata.name, VulnDBMetadata.namespace_name),
),
Index("ix_feed_data_vulndb_affected_cpes_product_version", product, version),
Index("ix_feed_data_vulndb_affected_cpes_fk", vulnerability_id, namespace_name),
{},
)
def __repr__(self):
return "<{} feed_name={}, vulnerability_id={}, product={}, version={}, created_at={}>".format(
self.__class__,
self.feed_name,
self.vulnerability_id,
self.product,
self.version,
self.created_at.isoformat(),
)
def get_cpestring(self):
ret = None
try:
if self.sw_edition or self.target_sw or self.target_hw or self.other:
edition = "~{}~{}~{}~{}~{}".format(
self.edition,
self.sw_edition,
self.target_sw,
self.target_hw,
self.other,
)
else:
edition = self.edition
uri_parts = [
"cpe",
"/" + self.part,
self.vendor,
self.product,
self.version,
self.update,
edition,
self.language,
]
uri = ":".join(uri_parts)
ret = uri.strip(":") # remove any trailing :
except:
ret = None
return ret
def get_cpe23string(self):
ret = None
try:
final_cpe = [
"cpe",
"2.3",
self.part,
self.vendor,
self.product,
self.version,
self.update,
self.edition,
self.language,
self.sw_edition,
self.target_sw,
self.target_hw,
self.other,
]
ret = ":".join(final_cpe)
except:
ret = None
return ret
def get_fixed_in(self):
return [
cpe.version
for cpe in self.parent.cpes
if not cpe.is_affected
and cpe.product == self.product
and cpe.vendor == self.vendor
and cpe.part == self.part
]
# Analysis Data for Images
class ImagePackage(Base):
"""
A package detected in an image by analysis
"""
__tablename__ = "image_packages"
image_id = Column(String, primary_key=True)
image_user_id = Column(String, primary_key=True)
name = Column(String, primary_key=True)
version = Column(String, primary_key=True)
pkg_type = Column(String, primary_key=True) # RHEL, DEB, APK, etc.
arch = Column(String, default="N/A", primary_key=True)
pkg_path = Column(String, default="pkgdb", primary_key=True)
pkg_path_hash = Column(String) # The sha256 hash of the path in hex
# Could pkg namespace be diff than os? e.g. rpms in Deb?
distro_name = Column(String)
distro_version = Column(String)
like_distro = Column(String)
fullversion = Column(String)
release = Column(String, default="")
origin = Column(String, default="N/A")
src_pkg = Column(String, default="N/A")
normalized_src_pkg = Column(String, default="N/A")
metadata_json = Column(StringJSON)
license = Column(String, default="N/A")
size = Column(BigInteger, nullable=True)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
updated_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
vulnerabilities = relationship(
"ImagePackageVulnerability", back_populates="package", lazy="dynamic"
)
image = relationship("Image", back_populates="packages")
pkg_db_entries = relationship(
"ImagePackageManifestEntry",
backref="package",
lazy="dynamic",
cascade=["all", "delete", "delete-orphan"],
)
__table_args__ = (
ForeignKeyConstraint(
columns=(image_id, image_user_id),
refcolumns=("images.id", "images.user_id"),
),
Index(
"ix_image_package_distronamespace",
name,
version,
distro_name,
distro_version,
normalized_src_pkg,
),
# TODO: add this index for feed sync performance, needs to be re-tested with new package usage
# Index('ix_image_package_distro_pkgs', distro_name, distro_version, name, normalized_src_pkg, version),
{},
)
_distro_namespace = None
@property
def distro_namespace_meta(self):
if not self._distro_namespace:
self._distro_namespace = DistroNamespace.for_obj(self)
return self._distro_namespace
@property
def distro_namespace(self):
if self.distro_name and self.distro_version:
return self.distro_name + ":" + self.distro_version
else:
return None
def get_pom_properties(self):
if not self.metadata_json:
return None
filebuf = self.metadata_json.get("pom.properties", "")
props = {}
for line in filebuf.splitlines():
# line = anchore_engine.utils.ensure_str(line)
if not re.match(r"\s*(#.*)?$", line):
kv = line.split("=")
key = kv[0].strip()
value = "=".join(kv[1:]).strip()
props[key] = value
return props
def find_vulnerabilities(self):
"""
Given an ImagePackage object, return the vulnerabilities that it matches.
:param package_obj:
:return: list of Vulnerability objects
"""
# ts = time.time()
# package_obj = self
log.debug(
"Finding vulnerabilities for package: {} - {}".format(
self.name, self.version
)
)
matches = []
db = get_thread_scoped_session()
# decide what type of scan(s) to perform
do_langscan = do_osscan = False
pkgkey = pkgversion = None
likematch = None
if self.pkg_type in ["java", "maven"]:
# search for maven hits
if self.metadata_json:
pombuf = self.metadata_json.get("pom.properties", "")
if pombuf:
pomprops = self.get_pom_properties()
pkgkey = "{}:{}".format(
pomprops.get("groupId"), pomprops.get("artifactId")
)
pkgversion = pomprops.get("version", None)
likematch = "%java%"
do_langscan = True
elif self.pkg_type in [
"ruby",
"gem",
"npm",
"js",
"python",
"nuget",
"dotnet",
"binary",
"go",
]:
pkgkey = self.name
pkgversion = self.version
if self.pkg_type in ["ruby", "gem"]:
likematch = "%gem%"
do_langscan = True
elif self.pkg_type in ["npm", "js"]:
likematch = "%npm%"
do_langscan = True
elif self.pkg_type in ["python"]:
likematch = "%python%"
do_langscan = True
elif self.pkg_type in ["nuget", "dotnet"]:
likematch = "%nuget%"
do_langscan = True
elif self.pkg_type in ["go"]:
likematch = "%go%"
do_langscan = True
elif self.pkg_type in ["binary"]:
likematch = "%binary%"
do_langscan = True
else:
do_osscan = True
if do_langscan:
semvercount = (
db.query(FixedArtifact)
.filter(FixedArtifact.version_format == "semver")
.count()
)
if semvercount:
nslang = self.pkg_type
log.debug(
"performing LANGPACK vuln scan {} - {}".format(pkgkey, pkgversion)
)
if pkgkey and pkgversion and likematch:
candidates = (
db.query(FixedArtifact)
.filter(FixedArtifact.name == pkgkey)
.filter(FixedArtifact.version_format == "semver")
.filter(FixedArtifact.namespace_name.like(likematch))
)
for candidate in candidates:
if (
candidate.vulnerability_id
not in [x.vulnerability_id for x in matches]
) and (
langpack_compare_versions(
candidate.version, pkgversion, language=nslang
)
):
matches.append(candidate)
if do_osscan:
log.debug("performing OS vuln scan {} - {}".format(self.name, self.version))
dist = DistroNamespace(
self.distro_name, self.distro_version, self.like_distro
)
namespace_name_to_use = dist.namespace_name
# All options are the same, no need to loop
if len(set(dist.like_namespace_names)) > 1:
# Look for exact match first
if (
not db.query(FeedGroupMetadata)
.filter(FeedGroupMetadata.name == dist.namespace_name)
.first()
):
# Check all options for distro/flavor mappings, stop at first with records present
for namespace_name in dist.like_namespace_names:
record_count = (
db.query(Vulnerability)
.filter(Vulnerability.namespace_name == namespace_name)
.count()
)
if record_count > 0:
namespace_name_to_use = namespace_name
break
fix_candidates, vulnerable_candidates = self.candidates_for_package(
namespace_name_to_use
)
for candidate in fix_candidates:
# De-dup evaluations based on the underlying vulnerability_id. For packages where src has many binary builds, once we have a match we have a match.
if candidate.vulnerability_id not in [
x.vulnerability_id for x in matches
] and candidate.match_but_not_fixed(self):
matches.append(candidate)
for candidate in vulnerable_candidates:
if candidate.vulnerability_id not in [
x.vulnerability_id for x in matches
] and candidate.match_and_vulnerable(self):
matches.append(candidate)
# log.debug("TIMER DB: {}".format(time.time() - ts))
return matches
def candidates_for_package(self, distro_namespace=None):
"""
Return all vulnerabilities for the named package with the specified distro. Will apply to any version
of the package. If version is used, will filter to only those for the specified version.
:param package_obj: the package to match against
:param distro_namespace: the DistroNamespace object to match against (typically computed
:return: List of Vulnerabilities
"""
package_obj = self
db = get_thread_scoped_session()
if not distro_namespace:
namespace_name = DistroNamespace.for_obj(package_obj).namespace_name
else:
namespace_name = distro_namespace
# Match the namespace and package name or src pkg name
fix_candidates = (
db.query(FixedArtifact)
.filter(
FixedArtifact.namespace_name == namespace_name,
or_(
FixedArtifact.name == package_obj.name,
FixedArtifact.name == package_obj.normalized_src_pkg,
),
)
.all()
)
# Match the namespace and package name or src pkg name
vulnerable_candidates = (
db.query(VulnerableArtifact)
.filter(
VulnerableArtifact.namespace_name == namespace_name,
or_(
VulnerableArtifact.name == package_obj.name,
VulnerableArtifact.name == package_obj.normalized_src_pkg,
),
)
.all()
)
return fix_candidates, vulnerable_candidates
class ImagePackageManifestEntry(Base):
"""
An entry from the package manifest (e.g. rpm, deb, apk) for verifying package contents in a generic way.
"""
__tablename__ = "image_package_db_entries"
# Package key
image_id = Column(String, primary_key=True)
image_user_id = Column(String, primary_key=True)
pkg_name = Column(String, primary_key=True)
pkg_version = Column(String, primary_key=True)
pkg_type = Column(String, primary_key=True) # RHEL, DEB, APK, etc.
pkg_arch = Column(String, default="N/A", primary_key=True)
pkg_path = Column(String, default="pkgdb", primary_key=True)
# File path
file_path = Column(String, primary_key=True)
is_config_file = Column(Boolean, nullable=True)
digest = Column(String) # Will include a prefix: sha256, sha1, md5 etc.
digest_algorithm = Column(String, nullable=True)
file_group_name = Column(String, nullable=True)
file_user_name = Column(String, nullable=True)
mode = Column(Integer, nullable=True) # Mode as an integer in decimal, not octal
size = Column(Integer, nullable=True)
__table_args__ = (
ForeignKeyConstraint(
columns=(
image_id,
image_user_id,
pkg_name,
pkg_version,
pkg_type,
pkg_arch,
pkg_path,
),
refcolumns=(
"image_packages.image_id",
"image_packages.image_user_id",
"image_packages.name",
"image_packages.version",
"image_packages.pkg_type",
"image_packages.arch",
"image_packages.pkg_path",
),
),
{},
)
NPM_SEQ = Sequence("image_npms_seq_id_seq", metadata=Base.metadata)
class ImageNpm(Base):
"""
NOTE: This is a deprecated class used for legacy support and upgrade. Image NPMs are now stored in the ImagePackage type
"""
__tablename__ = "image_npms"
image_user_id = Column(String, primary_key=True)
image_id = Column(String, primary_key=True)
path_hash = Column(String, primary_key=True) # The sha256 hash of the path in hex
path = Column(String)
name = Column(String)
origins_json = Column(StringJSON)
source_pkg = Column(String)
licenses_json = Column(StringJSON)
versions_json = Column(StringJSON)
latest = Column(String)
seq_id = Column(
Integer, NPM_SEQ, server_default=NPM_SEQ.next_value()
) # Note this is not autoincrement as the upgrade code in upgrade.py sets. This table is no longer used as of 0.3.1 and is here for upgrade continuity only.
image = relationship("Image", back_populates="npms")
__table_args__ = (
ForeignKeyConstraint(
columns=(image_id, image_user_id),
refcolumns=("images.id", "images.user_id"),
),
Index("idx_npm_seq", seq_id),
{},
)
def __repr__(self):
return "<{} user_id={}, img_id={}, name={}>".format(
self.__class__, self.image_user_id, self.image_id, self.name
)
GEM_SEQ = Sequence("image_gems_seq_id_seq", metadata=Base.metadata)
class ImageGem(Base):
"""
NOTE: This is a deprecated class used for legacy support. Gems are now loaded as types of packages for the ImagePackage class
"""
__tablename__ = "image_gems"
image_user_id = Column(String, primary_key=True)
image_id = Column(String, primary_key=True)
path_hash = Column(String, primary_key=True) # The sha256 hash of the path in hex
path = Column(String)
name = Column(String)
files_json = Column(StringJSON)
origins_json = Column(StringJSON)
source_pkg = Column(String)
licenses_json = Column(StringJSON)
versions_json = Column(StringJSON)
latest = Column(String)
seq_id = Column(
Integer, GEM_SEQ, server_default=GEM_SEQ.next_value()
) # This table is no longer used as of 0.3.1 and is here for upgrade continuity only.
image = relationship("Image", back_populates="gems")
__table_args__ = (
ForeignKeyConstraint(
columns=(image_id, image_user_id),
refcolumns=("images.id", "images.user_id"),
),
Index("idx_gem_seq", seq_id),
{},
)
def __repr__(self):
return "<{} user_id={}, img_id={}, name={}>".format(
self.__class__, self.image_user_id, self.image_id, self.name
)
class ImageCpe(Base):
__tablename__ = "image_cpes"
image_user_id = Column(String, primary_key=True)
image_id = Column(String, primary_key=True)
pkg_type = Column(String, primary_key=True) # java, python, gem, npm, etc
pkg_path = Column(String, primary_key=True)
cpetype = Column(String, primary_key=True)
vendor = Column(String, primary_key=True)
name = Column(String, primary_key=True)
version = Column(String, primary_key=True)
update = Column(String, primary_key=True)
meta = Column(String, primary_key=True)
image = relationship("Image", back_populates="cpes")
__table_args__ = (
ForeignKeyConstraint(
columns=(image_id, image_user_id),
refcolumns=("images.id", "images.user_id"),
),
Index("ix_image_cpe_user_img", image_id, image_user_id),
{},
)
def __repr__(self):
return "<{} user_id={}, img_id={}, name={}>".format(
self.__class__, self.image_user_id, self.image_id, self.name
)
def fixed_in(self):
return None
def get_cpestring(self):
ret = None
try:
final_cpe = ["cpe", "-", "-", "-", "-", "-", "-"]
final_cpe[1] = self.cpetype
final_cpe[2] = self.vendor
final_cpe[3] = self.name
final_cpe[4] = self.version
final_cpe[5] = self.update
final_cpe[6] = self.meta
ret = ":".join(final_cpe)
except:
ret = None
return ret
def get_cpe23string(self):
ret = None
try:
final_cpe = [
"cpe",
"2.3",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
]
final_cpe[2] = self.cpetype[1]
final_cpe[3] = self.vendor
final_cpe[4] = self.name
final_cpe[5] = self.version
final_cpe[6] = self.update
# final_cpe[7] = self.edition
# final_cpe[8] = self.language
# final_cpe[9] = self.sw_edition
# final_cpe[10] = self.target_sw
# final_cpe[11] = self.target_hw
final_cpe[12] = self.meta
ret = ":".join(final_cpe)
except:
ret = None
return ret
class FilesystemAnalysis(Base):
"""
A unified and compressed record of the filesystem-level entries in an image. An alternative to the FilesystemItem approach,
this allows much faster index operations due to a smaller index, but no queries into the content of the filesystems themselves.
"""
__tablename__ = "image_fs_analysis_dump"
compression_level = 6
supported_algorithms = ["gzip"]
image_id = Column(String, primary_key=True)
image_user_id = Column(String, primary_key=True)
compressed_content_hash = Column(String)
compressed_file_json = Column(LargeBinary, nullable=False)
total_entry_count = Column(Integer, default=0)
file_count = Column(Integer, default=0)
directory_count = Column(Integer, default=0)
non_packaged_count = Column(Integer, default=0)
suid_count = Column(Integer, default=0)
image = relationship("Image", back_populates="fs")
compression_algorithm = Column(String, default="gzip")
created_at = Column(DateTime, default=datetime.datetime.utcnow)
updated_at = Column(
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
)
image = relationship("Image", back_populates="fs")
_files = None
__table_args__ = (
ForeignKeyConstraint(
columns=(image_id, image_user_id),
refcolumns=("images.id", "images.user_id"),
),
{},
)
# NOTE: operations on the content of the dict itself will not trigger dirty updates and a flush to db,
# must explicitly set the value to a new dict if writes need to be persisted.
@property
def files(self):
if not self._files:
self._files = self._files_json()
return self._files
@files.setter
def files(self, value):
self._files = value
self._files_from_json(self._files)
def _files_json(self):
if self.compression_algorithm == "gzip":
return json.loads(
ensure_str(zlib.decompress(ensure_bytes(self.compressed_file_json)))
)
else:
raise ValueError(
"Got unexpected compresssion algorithm value: {}. Expected {}".format(
self.compression_algorithm, self.supported_algorithms
)
)
def _files_from_json(self, file_json):
"""
Compress and hash the file_json content
:param file_json:
:return:
"""
self.compressed_file_json = zlib.compress(json.dumps(file_json).encode("utf-8"))
self.compression_algorithm = "gzip"
self.compressed_content_hash = hashlib.sha256(
self.compressed_file_json
).hexdigest()
class AnalysisArtifact(Base):
"""
A generic container for an analysis result that doesn't require significant structure.
Basically wraps a key-value output from a specific analyzer.
"""
__tablename__ = "image_analysis_artifacts"
image_id = Column(String, primary_key=True)
image_user_id = Column(String, primary_key=True)
analyzer_id = Column(
String, primary_key=True
) # The name of the analyzer (e.g. layer_info)
analyzer_artifact = Column(
String, primary_key=True
) # The analyzer artifact name (e.g. layers_to_dockerfile)
analyzer_type = Column(
String, primary_key=True
) # The analyzer type (e.g. base, user, or extra)
artifact_key = Column(String, primary_key=True)
str_value = Column(Text)
json_value = Column(StringJSON)
binary_value = Column(LargeBinary)
created_at = Column(
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow,
nullable=False,
)
last_modified = Column(
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow,
nullable=False,
)
image = relationship("Image", back_populates="analysis_artifacts")
__table_args__ = (
ForeignKeyConstraint(
columns=(image_id, image_user_id),
refcolumns=("images.id", "images.user_id"),
),
{},
)
class Image(Base):
"""
The core image analysis record. Contains metadata about the image itself.
"""
__tablename__ = "images"
id = Column(String, primary_key=True)
user_id = Column(
String, primary_key=True
) # Images are namespaced in the system to prevent overlap
state = Column(
Enum("failed", "initializing", "analyzing", "analyzed", name="image_states"),
default="initializing",
) # For now we only load analyzed images, no progress tracking
anchore_type = Column(
Enum(
"undefined",
"base",
"application",
"user",
"intermediate",
name="anchore_image_types",
),
default="undefined",
) # TODO: verify if base or undefined should be default
size = Column(BigInteger)
created_at = Column(
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow,
nullable=False,
)
last_modified = Column(
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow,
nullable=False,
)
digest = Column(String)
distro_name = Column(String)
distro_version = Column(String)
like_distro = Column(String)
# Should be native JSON, can be handled
layers_json = Column(StringJSON)
docker_history_json = Column(StringJSON)
docker_data_json = Column(StringJSON)
familytree_json = Column(StringJSON)
layer_info_json = Column(StringJSON)
dockerfile_contents = Column(Text)
dockerfile_mode = Column(String, default="Guessed")
packages = relationship(
"ImagePackage",
back_populates="image",
lazy="dynamic",
cascade=["all", "delete", "delete-orphan"],
)
fs = relationship(
"FilesystemAnalysis",
uselist=False,
lazy="select",
cascade=["all", "delete", "delete-orphan"],
)
# TODO - move these to ImagePackage records instead of individual tables
gems = relationship(
"ImageGem",
back_populates="image",
lazy="dynamic",
cascade=["all", "delete", "delete-orphan"],
)
npms = relationship(
"ImageNpm",
back_populates="image",
lazy="dynamic",
cascade=["all", "delete", "delete-orphan"],
)
cpes = relationship(
"ImageCpe",
back_populates="image",
lazy="dynamic",
cascade=["all", "delete", "delete-orphan"],
)
analysis_artifacts = relationship(
"AnalysisArtifact",
back_populates="image",
lazy="dynamic",
cascade=["all", "delete", "delete-orphan"],
)
@property
def distro_namespace(self):
if self.distro_name and self.distro_version:
return self.distro_name + ":" + self.distro_version
else:
return None
def get_packages_by_type(self, pkg_type):
db = get_thread_scoped_session()
typed_packages = (
db.query(ImagePackage)
.filter(
ImagePackage.image_id == self.id,
ImagePackage.image_user_id == self.user_id,
ImagePackage.pkg_type == pkg_type,
)
.all()
)
return typed_packages
def vulnerabilities(self):
"""
Load vulnerabilties for all packages in this image
:return: list of ImagePackageVulnerabilities
"""
db = get_thread_scoped_session()
known_vulnerabilities = (
db.query(ImagePackageVulnerability)
.filter(
ImagePackageVulnerability.pkg_user_id == self.user_id,
ImagePackageVulnerability.pkg_image_id == self.id,
)
.all()
)
return known_vulnerabilities
def cpe_vulnerabilities(self, _nvd_cls, _cpe_cls):
"""
Similar to the vulnerabilities function, but using the cpe matches instead, basically the NVD raw data source
:return: list of (image_cpe, cpe_vulnerability) tuples
"""
db = get_thread_scoped_session()
if not _nvd_cls or not _cpe_cls:
_nvd_cls, _cpe_cls = select_nvd_classes(db)
cpe_vulnerabilities = (
db.query(ImageCpe, _cpe_cls)
.filter(
ImageCpe.image_id == self.id,
ImageCpe.image_user_id == self.user_id,
func.lower(ImageCpe.name) == _cpe_cls.name,
ImageCpe.version == _cpe_cls.version,
)
.options(joinedload(_cpe_cls.parent, innerjoin=True))
.all()
)
# vulndb is similar to nvd cpes, add them here
cpe_vulnerabilities.extend(
db.query(ImageCpe, VulnDBCpe)
.filter(
ImageCpe.image_id == self.id,
ImageCpe.image_user_id == self.user_id,
func.lower(ImageCpe.name) == VulnDBCpe.name,
ImageCpe.version == VulnDBCpe.version,
VulnDBCpe.is_affected.is_(True),
)
.options(joinedload(VulnDBCpe.parent, innerjoin=True))
.all()
)
return cpe_vulnerabilities
def get_image_base(self):
"""
Get the image that is this image's base image if it exists. Indicated by first entry of the familytree
:return: Image object
"""
base_id = self.familytree_json[0] if self.familytree_json else self
if base_id == self.id:
return self
else:
db = get_thread_scoped_session()
return db.query(Image).get((base_id, self.user_id))
def __repr__(self):
return "<Image user_id={}, id={}, distro={}, distro_version={}, created_at={}, last_modified={}>".format(
self.user_id,
self.id,
self.distro_name,
self.distro_version,
self.created_at,
self.last_modified,
)
class ImagePackageVulnerability(Base):
"""
Provides a mapping between ImagePackage and Vulnerabilities
"""
__tablename__ = "image_package_vulnerabilities"
pkg_user_id = Column(String, primary_key=True)
pkg_image_id = Column(String, primary_key=True)
pkg_name = Column(String, primary_key=True)
pkg_version = Column(String, primary_key=True)
pkg_type = Column(String, primary_key=True) # RHEL, DEB, APK, etc.
pkg_arch = Column(String, default="N/A", primary_key=True)
pkg_path = Column(String, default="pkgdb", primary_key=True)
vulnerability_id = Column(String, primary_key=True)
vulnerability_namespace_name = Column(String)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
package = relationship("ImagePackage", back_populates="vulnerabilities")
vulnerability = relationship("Vulnerability")
__table_args__ = (
ForeignKeyConstraint(
columns=(
pkg_image_id,
pkg_user_id,
pkg_name,
pkg_version,
pkg_type,
pkg_arch,
pkg_path,
),
refcolumns=(
ImagePackage.image_id,
ImagePackage.image_user_id,
ImagePackage.name,
ImagePackage.version,
ImagePackage.pkg_type,
ImagePackage.arch,
ImagePackage.pkg_path,
),
),
ForeignKeyConstraint(
columns=(vulnerability_id, vulnerability_namespace_name),
refcolumns=(Vulnerability.id, Vulnerability.namespace_name),
),
{},
)
def fix_candidates(self) -> list:
"""
Return the list of FixedArtifact record given a package has been matched to the vulnerability by package/src-package
name. Does not perform a version check. Will return empty list if no matches
:return: the name-matched FixedArtifact record list
"""
if self.vulnerability.fixed_in:
name_matches = [self.pkg_name, self.package.normalized_src_pkg]
return [x for x in self.vulnerability.fixed_in if x.name in name_matches]
return []
def fixed_artifact(self):
"""
Return the FixedArtifact record given a package has been matched to the vulnerability
:return: the matched FixedArtifact record or None if not found
"""
candidates = self.fix_candidates()
candidate_count = len(candidates) if candidates else 0
if candidate_count == 0:
return None
elif candidate_count == 1:
return candidates[0]
fixed_in = None
# candidate_count >= 1
# Do version checks. This package must be affected by the range but not fixed.
matched = [x for x in candidates if x.match_but_not_fixed(self.package)]
if len(matched) == 1:
fixed_in = matched[0]
elif len(matched) > 1:
matched.sort(key=lambda x: x.updated_at, reverse=True)
fixed_in = matched[0]
# This shouldn't happen since it means there isn't consistency in the data
return fixed_in
def fixed_in(self):
"""
Return the fixed_in version string value given a package matched (in case there are multiple packages specified in the vuln.
:param package: package to find a fix version for, if available
:return: the fixed in version string if any or None if not found
"""
fixed_in = self.fixed_artifact()
fix_available_in = (
fixed_in.version if fixed_in and fixed_in.version != "None" else None
)
# NOTE: semver version format indicates a range where package
# is vulnerable (as opposed to a value where anythng < value
# is vulnerable, and the fix itself is known to exist), so we prepend a 'not' to indicate 'fix is available, if not in semver range'
if fixed_in and fixed_in.version_format in ["semver"]:
# Github Advisories can add the real version where there is a fix if any.
metadata = fixed_in.fix_metadata or {}
first_patched_version = metadata.get("first_patched_version")
if first_patched_version:
return first_patched_version
if (
fix_available_in
and fixed_in.fix_metadata
and fixed_in.fix_metadata.get("fix_exists", False)
):
fix_available_in = "! {}".format(fix_available_in)
else:
fix_available_in = None
return fix_available_in
def fix_has_no_advisory(self):
"""
For a given package vulnerability match, if the issue won't be addressed by the vendor return True.
Return False otherwise
:return:
"""
fixed_in = self.fixed_artifact()
return fixed_in and fixed_in.vendor_no_advisory
@classmethod
def from_pair(cls, package_obj, vuln_obj):
rec = ImagePackageVulnerability()
rec.pkg_name = package_obj.name
rec.pkg_type = package_obj.pkg_type
rec.pkg_arch = package_obj.arch
rec.pkg_image_id = package_obj.image_id
rec.pkg_user_id = package_obj.image_user_id
rec.pkg_version = package_obj.version
rec.pkg_path = package_obj.pkg_path
rec.vulnerability_id = (
vuln_obj.vulnerability_id
if hasattr(vuln_obj, "vulnerability_id")
else vuln_obj.id
)
rec.vulnerability_namespace_name = vuln_obj.namespace_name
return rec
def __repr__(self):
return "<ImagePackageVulnerability img_user_id={}, img_id={}, pkg_name={}, pkg_version={}, vuln_id={}, vuln_namespace={}, pkg_path={}>".format(
self.pkg_user_id,
self.pkg_image_id,
self.pkg_name,
self.pkg_version,
self.vulnerability_id,
self.vulnerability_namespace_name,
self.pkg_path,
)
# To support hash functions like set operations, ensure these align with primary key comparisons to ensure two identical records would match as such.
def __eq__(self, other):
return isinstance(other, type(self)) and (
self.pkg_user_id,
self.pkg_image_id,
self.pkg_name,
self.pkg_version,
self.pkg_type,
self.pkg_arch,
self.vulnerability_id,
self.pkg_path,
) == (
other.pkg_user_id,
other.pkg_image_id,
other.pkg_name,
other.pkg_version,
other.pkg_type,
other.pkg_arch,
other.vulnerability_id,
other.pkg_path,
)
def __hash__(self):
return hash(
(
self.pkg_user_id,
self.pkg_image_id,
self.pkg_name,
self.pkg_version,
self.pkg_type,
self.pkg_arch,
self.vulnerability_id,
self.pkg_path,
)
)
class IDistroMapper(object):
"""
Interface for a distro mapper object
"""
def __init__(self, distro, version, like_distro, found_mapping):
self.from_distro = distro
self.from_version = version
self.from_like_distro = like_distro
self.found_mapping = found_mapping
self.mapping = self._do_mapping()
def _do_mapping(self):
"""
Map from the given values to a new distro if an explicit mapping exists or else None
:param distro_name:
:param distro_version:
:param like_distro:
:return: list of tuples: [(distro, version, flavor), ... ,(distro, versionN, flavorN)]
"""
pass
def _map_name(self, distro_name, distro_version, like_distro, found_mapping=None):
pass
def _map_version(
self, distro_name, distro_version, like_distro, found_mapping=None
):
pass
def _map_flavor(self, distro_name, distro_version, like_distro, found_mapping=None):
pass
class VersionPreservingDistroMapper(IDistroMapper):
def _do_mapping(self):
"""
Map from the given values to a new distro if an explicit mapping exists or else None
:param distro_name:
:param distro_version:
:param like_distro:
:return: list of tuples: [(distro, version, flavor), ... ,(distro, versionN, flavorN)]
"""
distro = None
versions = None
flavor = None
try:
distro = self._map_name(
self.from_distro,
self.from_version,
self.from_like_distro,
self.found_mapping,
)
flavor = self._map_flavor(
self.from_distro,
self.from_version,
self.from_like_distro,
self.found_mapping,
)
versions = self._map_version(
self.from_distro,
self.from_version,
self.from_like_distro,
self.found_mapping,
)
return [
DistroTuple(distro=distro, version=v, flavor=flavor) for v in versions
]
except:
log.exception(
"Failed to fully construct the mapped distro from: {}, {}, {}".format(
self.from_distro, self.from_version, self.from_like_distro
)
)
raise
def _map_name(self, distro_name, distro_version, like_distro, found_mapping=None):
if found_mapping:
return found_mapping.to_distro
else:
return distro_name
def _map_flavor(self, distro_name, distro_version, like_distro, found_mapping=None):
if found_mapping:
return found_mapping.flavor
else:
db = get_thread_scoped_session()
candidates = like_distro.split(",") if like_distro else []
for c in candidates:
mapping = db.query(DistroMapping).get(c)
if mapping:
return mapping.flavor
return None
def _map_version(
self, distro_name, distro_version, like_distro, found_mapping=None
):
"""
Maps version into a list of versions ordered by closeness of match: [full original version, major.minor]
Only provides the second, major/minor, mapping if the version matches a dot delimited digit sequence
:param distro_name:
:param distro_version:
:param like_distro:
:param found_mapping:
:return:
"""
# Parse down to major, minor only if has a subminor
patt = re.match(r"(\d+)\.(\d+)\.(\d+)", distro_version)
if patt:
major, minor, subminor = patt.groups()
return [distro_version, "{}.{}".format(major, minor), "{}".format(major)]
# Parse dow to only major
patt = re.match(r"(\d+)\.(\d+)", distro_version)
if patt:
major, minor = patt.groups()
return [distro_version, "{}.{}".format(major, minor), "{}".format(major)]
return [distro_version]
class DistroMapping(Base):
"""
A mapping entry between a distro with known cve feed and other similar distros.
Used to explicitly map similar distros to a base feed for cve matches.
"""
__tablename__ = "distro_mappings"
__distro_mapper_cls__ = VersionPreservingDistroMapper
from_distro = Column(String, primary_key=True) # The distro to be checked
to_distro = Column(
String
) # The distro to use instead of the pk distro to do cve checks
flavor = Column(String) # The distro flavor to use (e.g. RHEL, or DEB)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
@classmethod
def distros_for(cls, distro, version, like_distro=""):
"""
Returns a pair of mappings for the given object assuming the object has distro_name and distro_version and like_distro
fields. First element of the pair is the exact distro namespace, the second is either None or a like-relation mapping
using the to_distro value of the found mapping.
:param obj:
:return: list of DistroTuples for most-to-least exact match
"""
db = get_thread_scoped_session()
found = db.query(DistroMapping).get(distro)
mapper = cls.__distro_mapper_cls__(distro, version, like_distro, found)
return mapper.mapping
@classmethod
def distros_mapped_to(cls, distro, version):
"""
Reverse of distros_for, returns the list of namespace names that would map to the given distro and version.
:param distro:
:param version:
:return:
"""
db = get_thread_scoped_session()
name_matches = (
db.query(DistroMapping).filter(DistroMapping.to_distro == distro).all()
)
return [
DistroTuple(
distro=mapping.from_distro, version=version, flavor=mapping.flavor
)
for mapping in name_matches
]
def __str__(self):
return "<DistroMapping>from={} to={}, flavor={}".format(
self.from_distro, self.to_distro, self.flavor
)
class DistroNamespace(object):
"""
A helper object for holding and converting distro names and namespaces between image and vulnerability records.
Abstracts the conversion of name, version, like_version sets into usable strings for comparing and matching records.
The 'like' relation defines similar pkg types and cve feed sources. If distro A is like distro B, then distro A should be able to match
against distro B's vulnerability feeds.
"""
@classmethod
def for_obj(cls, obj):
if hasattr(obj, "distro_name") and hasattr(obj, "distro_version"):
return DistroNamespace(
getattr(obj, "distro_name"),
getattr(obj, "distro_version"),
like_distro=getattr(obj, "like_distro", None),
)
else:
raise TypeError(
"Given object must have attributes: distro_name, distro_version"
)
def __init__(self, name="UNKNOWN", version="UNKNOWN", like_distro=None):
self.name = name
self.version = version
self.like_distro = like_distro
self.mapping = DistroMapping.distros_for(
self.name, self.version, self.like_distro
)
self.flavor = self.mapping[0].flavor if self.mapping else "Unknown"
self.namespace_name = DistroNamespace.as_namespace_name(
self.mapping[0].distro, self.mapping[0].version
)
self.like_namespace_names = [
DistroNamespace.as_namespace_name(x.distro, x.version) for x in self.mapping
]
@staticmethod
def as_namespace_name(name, version):
"""
Direct conversion to a single namespace name. Does not follow any 'like' relations.
:return:
"""
return name + ":" + version
def mapped_names(self):
"""
Return the list of namespaces that can map to this one. Only returns distro names who's DistroMapping relation is the exact
name of this object's name field. Ensures that only direct mappings are returned, and avoids intermediate names being mapped
as related to each other when they simply share a parent.
E.g.
ol like centos,
fedora like centos,
fedora not like ol
ol not like fedora
:return: list of name, version pairs
"""
return [
x.distro for x in DistroMapping.distros_mapped_to(self.name, self.version)
]
class CachedPolicyEvaluation(Base):
__tablename__ = "policy_engine_evaluation_cache"
user_id = Column(String, primary_key=True)
image_id = Column(String, primary_key=True)
eval_tag = Column(String, primary_key=True)
bundle_id = Column(
String, primary_key=True
) # Need both id and digest to differentiate a new bundle vs update to bundle that requires a flush of the old record
bundle_digest = Column(String, primary_key=True)
result = Column(
StringJSON, nullable=False
) # Result struct, based on the 'type' inside, may be literal content or a reference to the archive
created_at = Column(
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow,
nullable=False,
)
last_modified = Column(
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow,
nullable=False,
)
def key_tuple(self):
return (
self.user_id,
self.image_id,
self.eval_tag,
self.bundle_id,
self.bundle_digest,
)
def _constuct_raw_result(self, result_json):
return {"type": "direct", "result": result_json}
def _construct_remote_result(self, bucket, key, digest):
"""
Build the result json for the db record
:param bucket: bucket in archive to lookup result
:param key: key in archive to lookup result
:param digest: sha256 digest of the result
:return:
"""
return {
"type": "archive",
"digest": digest,
"uri": "catalog://{bucket}/{key}".format(bucket=bucket, key=key),
}
def add_raw_result(self, result_json):
self.result = self._constuct_raw_result(result_json)
def add_remote_result(self, bucket, key, result_digest):
self.result = self._construct_remote_result(bucket, key, result_digest)
def is_raw(self):
return self.result["type"] == "direct"
def is_archive_ref(self):
return self.result["type"] == "archive"
def archive_tuple(self):
"""
Returns the bucket, key tuple for an archive reference
:return:
"""
if self.is_archive_ref():
uri = self.result.get("uri", "")
_, path = uri.split("catalog://", 1)
bucket, key = path.split("/", 1)
return bucket, key
else:
raise ValueError("Result type is not an archive")
def select_nvd_classes(db=None):
if not db:
db = get_thread_scoped_session()
_nvd_cls = NvdMetadata
_cpe_cls = CpeVulnerability
try:
fmd = db.query(FeedMetadata).filter(FeedMetadata.name == "nvdv2").first()
if fmd and fmd.last_full_sync:
_nvd_cls = NvdV2Metadata
_cpe_cls = CpeV2Vulnerability
except Exception as err:
log.warn("could not query for nvdv2 sync: {}".format(err))
log.debug("selected {}/{} nvd classes".format(_nvd_cls, _cpe_cls))
return _nvd_cls, _cpe_cls
def _get_one_or_none(key, val, collection):
"""
Find a match for the object in the collection using the key-value pair,
return the result only if 1 match is found.
Example instance of collection
[
{
"source": "CVE ID",
"url": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=2000-0089"
},
{
"source": "Bugtraq ID",
"url": "http://www.securityfocus.com/bid/947"
},
]
"""
if not key or not val:
return None
result = None
for entry in collection:
if entry.get(key) == val:
if result:
return None
else:
result = entry
return result
|
py | b40a68de38098c076e01243b8703e1da9e39a695 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
---
This file is part of pygalle.core.env
Copyright (c) 2018 SAS 9 Février.
Distributed under the MIT License (license terms are at http://opensource.org/licenses/MIT).
---
"""
import unittest
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'src'))
def test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('test', pattern='test_*.py')
print(test_suite)
return test_suite
|
py | b40a691159176d07f642cace98d4884e516b2dd4 | import pandas as pd
import numpy as np
def read_omni_data(fname='data/omni_5min_features.lst'):
columns = ['Year', 'DOY', 'Hour', 'Minute', 'By', 'Bz', 'Vx', 'Vy', 'Vz', 'Np', 'PDYN', 'SYMH']
data = pd.read_csv(fname, delim_whitespace=True, header=None, names=columns)
return data
def read_tsyg_data(fname='data/2004_OMNI_5m_with_TA15_drivers.dat'):
columns = ['Year', 'DOY', 'Hour', 'Minute', 'Bx', 'By', 'Bz', 'Vx', 'Vy', 'Vz', 'Np', 'T', 'SYMH', 'IMFFlag', 'SWFlag', 'Tilt', 'PDYN', 'N', 'B']
data = pd.read_csv(fname, delim_whitespace=True, header=None, names=columns)
data = data.drop(columns=['Bx', 'T', 'IMFFlag', 'SWFlag', 'Tilt'])
neworder = ['Year', 'DOY', 'Hour', 'Minute', 'By', 'Bz', 'Vx', 'Vy', 'Vz', 'Np', 'PDYN', 'SYMH', 'N', 'B']
data = data[neworder]
return data
def compute_N(Vx, Vy, Vz, By, Bz):
V = np.sqrt(Vx**2 + Vy**2 + Vz**2)
Bt = np.sqrt(By**2 + Bz**2)
th = np.arctan2(By, Bz)
N = 0.86*(V/400)**(4/3) * (Bt/5)**(2/3) * (np.sin(th/2) ** 8) ** (1/3)
return np.round(N.to_numpy()[0],3)
def compute_B(Vx, Vy, Vz, By, Bz, Np):
V = np.sqrt(Vx**2 + Vy**2 + Vz**2)
Bt = np.sqrt(By**2 + Bz**2)
th = np.arctan2(By, Bz)
B = (Np/5)**0.5 * (V / 400)**2.5 * Bt / 5 * np.sin(th/2) ** 6
return np.round(B.to_numpy()[0],3)
def prepare_dataset(data):
data['PDYN'] = round((data.Np / 1000000) * (data.Vx**2 + data.Vy**2 + data.Vz**2) * 2, 2)
data['N'] = compute_N(data.Vx, data.Vy, data.Vz, data.By, data.Bz)
data['B'] = compute_B(data.Vx, data.Vy, data.Vz, data.By, data.Bz, data.Np)
return data
if __name__ == '__main__':
print(read_tsyg_data()) |
py | b40a696be18e9449d0592269c10edb77e109b7c5 | #!/usr/bin/env python3
import logging
import arinna.log as log
import sys
import queue
import arinna.config as config
import arinna.mqtt_client
import mppsolar
logger = logging.getLogger(__name__)
class InverterSerialAdapter:
def __init__(self, port, baudrate):
logger.info('Port: {}'.format(port))
logger.info('Baudrate: {}'.format(baudrate))
self.serial_adapter = mppsolar.mppUtils(port, baudrate)
def send_command(self, command):
logger.info('Sending command: {}'.format(command))
response = self.serial_adapter.getResponseDict(command)
return response
def on_message(_, command_queue, message):
try:
logger.info('Message received')
logger.info('Payload: {}'.format(message.payload))
logger.info('Topic: {}'.format(message.topic))
command_queue.put(message.payload.decode())
except Exception:
logger.exception('Unknown exception occurred in on_message')
class InverterMQTTSubscriber:
def __init__(self, command_queue, mqtt_client):
self.command_queue = command_queue
self.mqtt_client = mqtt_client
def subscribe_request(self):
self.mqtt_client.set_user_data(self.command_queue)
self.mqtt_client.set_on_message(on_message)
self.mqtt_client.subscribe('inverter/request')
class InverterMQTTPublisher:
def __init__(self, mqtt_client):
self.mqtt_client = mqtt_client
def publish_response(self, response):
for key, status in response.items():
logger.info('Sending response')
topic = 'inverter/response/' + key
value, unit = status
logger.info('Topic: {}'.format(topic))
logger.info('Payload: {}'.format(value))
self.mqtt_client.publish(topic, value)
def publish_request(self, request):
logger.info('Publishing message')
topic = 'inverter/request'
logger.info('Topic: {}'.format(topic))
logger.info('Payload: {}'.format(request))
self.mqtt_client.publish(topic, request)
logger.info('Message published')
def main():
settings = config.load()
log.setup_logging()
serial_adapter = InverterSerialAdapter(settings.serial_port,
settings.baudrate)
command_queue = queue.Queue()
logger.info('Starting MQTT loop')
mqtt_client = arinna.mqtt_client.MQTTClient()
mqtt_client.connect()
mqtt_client.loop_start()
mqtt_subscriber = InverterMQTTSubscriber(command_queue,
mqtt_client)
mqtt_subscriber.subscribe_request()
mqtt_publisher = InverterMQTTPublisher(mqtt_client)
try:
logger.info('Starting listening loop')
while True:
logger.info('Waiting for command')
command = command_queue.get()
logger.info('Command received: {}'.format(command))
try:
response = serial_adapter.send_command(command)
except AttributeError:
logger.warning('Failed to parse response. Skipping.')
continue
if not response:
logger.warning('Response is empty!')
continue
logger.info('Response: {}'.format(response))
mqtt_publisher.publish_response(response)
except KeyboardInterrupt:
logger.info('Listening loop stopped by user')
except Exception:
logger.exception('Unknown exception occurred')
finally:
mqtt_client.loop_stop()
mqtt_client.disconnect()
logger.info('Listening loop stopped')
return 0
if __name__ == '__main__':
sys.exit(main())
|
py | b40a6b0d10da7236febfeaefc79493dcd3619398 | """
@created_at 2015-05-11
@author Exequiel Fuentes Lettura <[email protected]>
"""
import os
class FileUtils:
"""Define a simple class with useful methods for handling files"""
LOG_BASE_DIR_NAME = "log"
LOG_FILENAME = "project.log"
def __init__(self):
pass
def do_path(self, paths=[]):
if not paths:
raise Exception("There is no paths to join")
return os.path.join(*paths)
def log_path(self):
pathfile = os.path.dirname(os.path.abspath(__file__))
return os.path.join(pathfile, "..", "..", "..", self.LOG_BASE_DIR_NAME)
def log_file(self):
return os.path.join(self.log_path(), self.LOG_FILENAME)
|
py | b40a6be0c11d1951a20a0dd4489b622a48a01ec6 | # backward compatibility for actors.experimental - remove when not needed
from __future__ import absolute_import, division, print_function, unicode_literals
from wowp.actors.experimental import Splitter, Chain
from wowp.schedulers import NaiveScheduler
from wowp.actors import FuncActor
from wowp.util import ConstructorWrapper
def test_splitter():
splitter = Splitter(multiplicity=2, inport_name="x")
assert len(splitter.outports) == 2
scheduler = NaiveScheduler()
for i in range(0, 10):
scheduler.put_value(splitter.inports.x, i)
scheduler.execute()
x1_all = list(splitter.outports["x_1"].pop_all())
x2_all = list(splitter.outports["x_2"].pop_all())
print("x1:", x1_all)
print("x2:", x2_all)
assert [0, 2, 4, 6, 8] == x1_all
assert [1, 3, 5, 7, 9] == x2_all
def double_me(x):
return x * 2
def test_chain():
func_generator = ConstructorWrapper(FuncActor, double_me)
chain = Chain("func_chain", [func_generator, func_generator])
wf = chain.get_workflow()
res = wf(inp=4)
assert res["out"].pop() == 16
res = wf(inp=2)
assert res["out"].pop() == 8
res = wf(inp="a")
assert res["out"].pop() == "aaaa"
|
py | b40a6d0e3abf0717bda33017ee26076483c3796a | from rdopkg.action import Action, Arg
ACTIONS = [
Action('clone',
help="custom clone action replacing internal one",
required_args=[
Arg('package', positional=True, metavar='PACKAGE',
help="custom package to clone"),
]),
]
|
py | b40a6db0d1df28ce2013e1b3dff718b4878dae87 | import logging
import random
from importlib import import_module
from typing import Sequence, Type
from algo_battle.domain.algorithmus import Algorithmus
def lese_zahl(text: str, default: int = None) -> int:
default_text = " ({})".format(default) if default else ""
eingabe = input("{}{}: ".format(text, default_text))
try:
return int(eingabe)
except ValueError:
return _nochmal_oder_default(
"Die Eingabe '{}' konnte nicht als Zahl gelesen.".format(eingabe),
default, lese_zahl, text, default
)
def lese_arena_groesse(text: str, default: (int, int) = None) -> (int, int):
default_text = " ({}x{})".format(default[0], default[1]) if default else ""
eingabe = input("{}{}: ".format(text, default_text)).strip()
teile = eingabe.split("x")
if len(teile) < 2:
return _nochmal_oder_default(
"Das Trennzeichen 'x' konnte nicht in der Eingabe '{}' gefunden werden.".format(eingabe),
default, lese_arena_groesse, text, default
)
try:
breite = int(teile[0])
except ValueError:
return _nochmal_oder_default(
"Die Breite '{}' konnte nicht als Zahl gelesen werden.".format(teile[0]),
default, lese_arena_groesse, text, default
)
try:
hoehe = int(teile[1])
except ValueError:
return _nochmal_oder_default(
"Die Hoehe '{}' konnte nicht als Zahl gelesen werden.".format(teile[1]),
default, lese_arena_groesse, text, default
)
return breite, hoehe
def _nochmal_oder_default(nachricht: str, default, lese_methode, *args, **kwargs):
if default:
return default
else:
print(nachricht)
return lese_methode(*args, **kwargs)
def lese_algorithmus(text: str, fallback_algorithmen: Sequence[Type[Algorithmus]] = None) -> Type[Algorithmus]:
default_text = " (zufälliger Algorithmus)" if fallback_algorithmen else ""
eingabe = input("{}{}: ".format(text, default_text)).strip()
error = None
try:
algorithmus_klasse = lade_algorithmus_klasse(eingabe)
except ValueError as e:
algorithmus_klasse = random.choice(fallback_algorithmen) if fallback_algorithmen else None
error = e
if algorithmus_klasse:
print("Der Algorithmus {} wurde ausgewählt".format(algorithmus_klasse.__name__))
return algorithmus_klasse
else:
print(str(error))
return lese_algorithmus(text, fallback_algorithmen)
def lade_algorithmus_klasse(algorithmus: str) -> Type[Algorithmus]:
modul_name, klasse_name = parse_algorithmus_pfad(algorithmus)
try:
modul = import_module(modul_name)
return getattr(modul, klasse_name)
except (ImportError, ValueError) as e:
raise ValueError("Das Modul '{}' konnte nicht gefunden werden".format(modul_name)) from e
except AttributeError as e:
raise ValueError("Die Klasse '{}' konnte nicht im Modul '{}' gefunden werden".format(klasse_name, modul_name)) from e
def parse_algorithmus_pfad(pfad: str) -> (str, str):
if not pfad:
raise ValueError("Es wurde kein Pfad übergeben")
trenn_index = pfad.rfind(".")
if trenn_index < 0:
raise ValueError("Der Pfad '{}' konnte nicht geparsed werden".format(pfad))
return pfad[:trenn_index] if trenn_index > 0 else ".", pfad[trenn_index + 1:]
def logger() -> logging.Logger:
return logging.getLogger("Util") |
py | b40a6dec5f2cc9de3688530772ef5fec2e5e9bff | from anndata import AnnData
from pathlib import Path
from ..util import _doc_params, _read_to_str
from scanpy.readwrite import read
from scanpy import settings
from textwrap import indent
import tempfile
from ..io import upgrade_schema, AirrCell, from_airr_cells
import urllib.request
import zipfile
import pandas as pd
import scanpy as sc
from datetime import datetime
from ..util import tqdm
from scanpy import logging
import os.path
import platform
HERE = Path(__file__).parent
@_doc_params(
processing_code=indent(_read_to_str(HERE / "_processing_scripts/wu2020.py"), " ")
)
def wu2020() -> AnnData:
"""\
Return the dataset from :cite:`Wu2020` as AnnData object.
200k cells, of which 100k have TCRs.
This is how the dataset was processed:
.. code-block:: python
{processing_code}
"""
url = "https://github.com/icbi-lab/scirpy/releases/download/d0.1.0/wu2020.h5ad"
filename = settings.datasetdir / "wu2020.h5ad"
adata = read(filename, backup_url=url)
upgrade_schema(adata)
return adata
@_doc_params(
processing_code=indent(
_read_to_str(HERE / "_processing_scripts/wu2020_3k.py"), " "
)
)
def wu2020_3k() -> AnnData:
"""\
Return the dataset from :cite:`Wu2020` as AnnData object, downsampled
to 3000 TCR-containing cells.
This is how the dataset was processed:
.. code-block:: python
{processing_code}
"""
url = "https://github.com/icbi-lab/scirpy/releases/download/d0.1.0/wu2020_3k.h5ad"
filename = settings.datasetdir / "wu2020_3k.h5ad"
adata = read(filename, backup_url=url)
upgrade_schema(adata)
return adata
@_doc_params(
processing_code=indent(
_read_to_str(HERE / "_processing_scripts/maynard2020.py"), " "
)
)
def maynard2020() -> AnnData:
"""\
Return the dataset from :cite:`Maynard2020` as AnnData object.
21k cells from NSCLC profiled with Smart-seq2, of which 3,500 have :term:`TCRs<TCR>`
and 1,500 have :term:`BCRs<BCR>`.
The raw FASTQ files have been obtained from `PRJNA591860 <https://www.ebi.ac.uk/ena/browser/view/PRJNA591860>`__
and processed using the nf-core `Smart-seq2 pipeline <https://github.com/nf-core/smartseq2/>`__.
The processed files have been imported and transformed into an :class:`anndata.AnnData`
object using the following script:
.. code-block:: python
{processing_code}
"""
url = "https://github.com/icbi-lab/scirpy/releases/download/d0.1.0/maynard2020.h5ad"
filename = settings.datasetdir / "maynard2020.h5ad"
adata = read(filename, backup_url=url)
upgrade_schema(adata)
return adata
def vdjdb(cached: bool = True, *, cache_path="data/vdjdb.h5ad") -> AnnData:
"""\
Download VDJdb and process it into an AnnData object.
`VDJdb <https://vdjdb.cdr3.net/>`_ :cite:`vdjdb` is a curated database of
T-cell receptor (TCR) sequences with known antigen specificities.
Parameters
----------
cached
If `True`, attempt to read from the `data` directory before downloading
Returns
-------
An anndata object containing all entries from VDJDB in `obs`.
Each entry is represented as if it was a cell, but without gene expression.
Metadata is stored in `adata.uns["DB"]`.
"""
if cached:
try:
return sc.read_h5ad(cache_path)
except OSError:
pass
# if run on macOS, there may be certificate issues. Hence we have to set the default context to unverified
if platform.system() == "Darwin":
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
logging.info("Downloading latest version of VDJDB")
with urllib.request.urlopen(
"https://raw.githubusercontent.com/antigenomics/vdjdb-db/master/latest-version.txt"
) as url:
latest_versions = url.read().decode().split()
url = latest_versions[0]
with tempfile.TemporaryDirectory() as d:
d = Path(d)
urllib.request.urlretrieve(url, d / "vdjdb.tar.gz")
with zipfile.ZipFile(d / "vdjdb.tar.gz") as zf:
zf.extractall(d)
df = pd.read_csv(d / "vdjdb_full.txt", sep="\t")
tcr_cells = []
for idx, row in tqdm(
df.iterrows(), total=df.shape[0], desc="Processing VDJDB entries"
):
cell = AirrCell(cell_id=idx)
if not pd.isnull(row["cdr3.alpha"]):
alpha_chain = AirrCell.empty_chain_dict()
alpha_chain.update(
{
"locus": "TRA",
"junction_aa": row["cdr3.alpha"],
"v_call": row["v.alpha"],
"j_call": row["j.alpha"],
"consensus_count": 0,
"productive": True,
}
)
cell.add_chain(alpha_chain)
if not pd.isnull(row["cdr3.beta"]):
beta_chain = AirrCell.empty_chain_dict()
beta_chain.update(
{
"locus": "TRB",
"junction_aa": row["cdr3.beta"],
"v_call": row["v.beta"],
"d_call": row["d.beta"],
"j_call": row["j.beta"],
"consensus_count": 0,
"productive": True,
}
)
cell.add_chain(beta_chain)
INCLUDE_CELL_METADATA_FIELDS = [
"species",
"mhc.a",
"mhc.b",
"mhc.class",
"antigen.epitope",
"antigen.gene",
"antigen.species",
"reference.id",
"method.identification",
"method.frequency",
"method.singlecell",
"method.sequencing",
"method.verification",
"meta.study.id",
"meta.cell.subset",
"meta.subject.cohort",
"meta.subject.id",
"meta.replica.id",
"meta.clone.id",
"meta.epitope.id",
"meta.tissue",
"meta.donor.MHC",
"meta.donor.MHC.method",
"meta.structure.id",
"vdjdb.score",
]
for f in INCLUDE_CELL_METADATA_FIELDS:
cell[f] = row[f]
tcr_cells.append(cell)
logging.info("Converting to AnnData object")
adata = from_airr_cells(tcr_cells)
adata.uns["DB"] = {"name": "VDJDB", "date_downloaded": datetime.now().isoformat()}
# store cache
os.makedirs(os.path.dirname(os.path.abspath(cache_path)), exist_ok=True)
adata.write_h5ad(cache_path)
return adata
|
py | b40a6e6d95f5c1bf70f74a30db03b052c7a9bf81 | #!/usr/bin/env python3
"""Combine logs from multiple bitcoin nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile.
If no argument is provided, the most recent test directory will be used."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import re
import sys
import tempfile
# N.B.: don't import any local modules here - this script must remain executable
# without the parent module installed.
# Should match same symbol in `test_framework.test_framework`.
TMPDIR_PREFIX = "arvcoin_func_test_"
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'testdir', nargs='?', default='',
help=('temporary test directory to combine logs from. '
'Defaults to the most recent'))
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args = parser.parse_args()
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
testdir = args.testdir or find_latest_test_dir()
if not testdir:
print("No test directories found")
sys.exit(1)
if not args.testdir:
print("Opening latest test directory: {}".format(testdir), file=sys.stderr)
log_events = read_logs(testdir)
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def find_latest_test_dir():
"""Returns the latest tmpfile test directory prefix."""
tmpdir = tempfile.gettempdir()
def join_tmp(basename):
return os.path.join(tmpdir, basename)
def is_valid_test_tmpdir(basename):
fullpath = join_tmp(basename)
return (
os.path.isdir(fullpath)
and basename.startswith(TMPDIR_PREFIX)
and os.access(fullpath, os.R_OK)
)
testdir_paths = [
join_tmp(name) for name in os.listdir(tmpdir) if is_valid_test_tmpdir(name)
]
return max(testdir_paths, key=os.path.getmtime) if testdir_paths else None
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
timestamp = time_match.group()
if time_match.group(1) is None:
# timestamp does not have microseconds. Add zeroes.
timestamp_micro = timestamp.replace("Z", ".000000Z")
line = line.replace(timestamp, timestamp_micro)
timestamp = timestamp_micro
event = line
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
# Add the line. Prefix with space equivalent to the source + timestamp so log lines are aligned
event += " " + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
lines = event.event.splitlines()
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, lines[0], colors["reset"]))
if len(lines) > 1:
for line in lines[1:]:
print("{0}{1}{2}".format(colors[event.source.rstrip()], line, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
|
py | b40a6eedb1aa1182a96a459cece5c4595f13c979 | print "hi"
print "Like bad Windows newlines?"
|
py | b40a6ef3ce0618aec18446eb6484ff29e8e91264 | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class Ipv6(Base):
"""IPv6 global and per-port settings
The Ipv6 class encapsulates a required ipv6 resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'ipv6'
_SDM_ATT_MAP = {
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'EnableNaRouterBit': 'enableNaRouterBit',
'InitialRaCount': 'initialRaCount',
'MaxInitialRaInterval': 'maxInitialRaInterval',
'MaxRaInterval': 'maxRaInterval',
'Name': 'name',
'PermanentMacForGateway': 'permanentMacForGateway',
'RaRtrLifetime': 'raRtrLifetime',
'ReSendNsOnLinkUp': 'reSendNsOnLinkUp',
'RowNames': 'rowNames',
'SuppressNsForDuplicateGateway': 'suppressNsForDuplicateGateway',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(Ipv6, self).__init__(parent, list_op)
@property
def NsRate(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.topology.ipv6autoconfiguration.nsrate.nsrate_2743e8b1b7c27242856a5d009e73521d.NsRate): An instance of the NsRate class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.topology.ipv6autoconfiguration.nsrate.nsrate_2743e8b1b7c27242856a5d009e73521d import NsRate
if self._properties.get('NsRate', None) is not None:
return self._properties.get('NsRate')
else:
return NsRate(self)._select()
@property
def StartRate(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.topology.ethernet.startrate.startrate_2bc83a4fb9730935e8259bdb40af2dc0.StartRate): An instance of the StartRate class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.topology.ethernet.startrate.startrate_2bc83a4fb9730935e8259bdb40af2dc0 import StartRate
if self._properties.get('StartRate', None) is not None:
return self._properties.get('StartRate')
else:
return StartRate(self)._select()
@property
def StopRate(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.topology.ethernet.stoprate.stoprate_4ea9a1b38960d2b21012777131469a04.StopRate): An instance of the StopRate class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.topology.ethernet.stoprate.stoprate_4ea9a1b38960d2b21012777131469a04 import StopRate
if self._properties.get('StopRate', None) is not None:
return self._properties.get('StopRate')
else:
return StopRate(self)._select()
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def EnableNaRouterBit(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): If enabled, Router bit will be set in Neighbor Advertisement.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableNaRouterBit']))
@property
def InitialRaCount(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Initial Router Advertisement sent count. Values can range from 0 to 10.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['InitialRaCount']))
@property
def MaxInitialRaInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Maximum Initial Router Advertisement interval. Values can range from 3 to 16.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MaxInitialRaInterval']))
@property
def MaxRaInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Maximum Periodic Router Advertisement interval. Values can range from 9 to 1800.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MaxRaInterval']))
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def PermanentMacForGateway(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): When enabled, adds permanent entries for Gateways with manual MAC.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PermanentMacForGateway']))
@property
def RaRtrLifetime(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Router lifetime in Router Advertisement. Values can range from 0 to 9000.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RaRtrLifetime']))
@property
def ReSendNsOnLinkUp(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Resends neighbor solicitation after link up.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReSendNsOnLinkUp']))
@property
def RowNames(self):
# type: () -> List[str]
"""
Returns
-------
- list(str): Name of rows
"""
return self._get_attribute(self._SDM_ATT_MAP['RowNames'])
@property
def SuppressNsForDuplicateGateway(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Optimizes the gateway MAC discovery by sending a single NS request for each unique destination.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SuppressNsForDuplicateGateway']))
def update(self, Name=None):
# type: (str) -> Ipv6
"""Updates ipv6 resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def get_device_ids(self, PortNames=None, EnableNaRouterBit=None, InitialRaCount=None, MaxInitialRaInterval=None, MaxRaInterval=None, PermanentMacForGateway=None, RaRtrLifetime=None, ReSendNsOnLinkUp=None, SuppressNsForDuplicateGateway=None):
"""Base class infrastructure that gets a list of ipv6 device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- EnableNaRouterBit (str): optional regex of enableNaRouterBit
- InitialRaCount (str): optional regex of initialRaCount
- MaxInitialRaInterval (str): optional regex of maxInitialRaInterval
- MaxRaInterval (str): optional regex of maxRaInterval
- PermanentMacForGateway (str): optional regex of permanentMacForGateway
- RaRtrLifetime (str): optional regex of raRtrLifetime
- ReSendNsOnLinkUp (str): optional regex of reSendNsOnLinkUp
- SuppressNsForDuplicateGateway (str): optional regex of suppressNsForDuplicateGateway
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
|
py | b40a7043ff53d9c696a1ef4a0450a037ad688311 |
from nfce.parser import NFCeParser
from nfce.scrapper import NfeScrapper |
py | b40a70ac14f9c7869e0c0151896d72073c1adc2a | # project/app/main.py
import logging
from fastapi import FastAPI
from app.api import ping, summaries
from app.db import init_db
log = logging.getLogger("uvicorn")
def create_application() -> FastAPI:
application = FastAPI()
application.include_router(ping.router)
application.include_router(
summaries.router, prefix="/summaries", tags=["summaries"]
)
return application
app = create_application()
@app.on_event("startup")
async def startup_event():
log.info("Starting up...")
init_db(app)
@app.on_event("shutdown")
async def shutdown_event():
log.info("Shutting down...")
|
py | b40a70b7c97f4c7958d195091a6f17482b4d6d11 | from .system import System
from .cells import CellLine, behavior
from .world import World
from .action import Action
__all__ = ['System', 'CellLine', 'Action', 'World', 'behavior']
|
py | b40a70d0cfb4877ef38231cb89fd5b9797e0716e | # -*- coding: utf-8 -*-
"""
New Zealand regions.
Source: http://en.wikipedia.org/wiki/Regions_of_New_Zealand#List_of_regions
"""
from __future__ import unicode_literals
from localflavor.stub import _
#: A list of regions
REGION_CHOICES = (
('NZ-NTL', _('Northland')),
('NZ-AUK', _('Auckland')),
('NZ-WKO', _('Waikato')),
('NZ-BOP', _('Bay of Plenty')),
('NZ-GIS', _('Gisborne')),
('NZ-HKB', _('Hawke\'s Bay')),
('NZ-TKI', _('Taranaki')),
('NZ-MWT', _('Manawatu-Wanganui')),
('NZ-WGN', _('Wellington')),
('NZ-TAS', _('Tasman')),
('NZ-NSN', _('Nelson')),
('NZ-MBH', _('Marlborough')),
('NZ-WTC', _('West Coast')),
('NZ-CAN', _('Canterbury')),
('NZ-OTA', _('Otago')),
('NZ-STL', _('Southland')),
)
|
py | b40a70ee401f8b1b74ad8bf265735b974955ddda | import time
try:
from pynput.keyboard import *
except:
import os
os.system("pip3 install pynput")
from pynput.keyboard import *
key = Controller()
def spam(word, times):
for i in range(0, times):
key.type(word)
key.press(Key.enter)
time.sleep(0.45)
time.sleep(3)
spam("hi", 1)
|
py | b40a7132a3e2181c69293850b94d07b679128179 | import math
import torch
import torch.nn.functional as F
import scipy.stats as stats
import numpy as np
from fairseq import utils, metrics
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("wic")
class WiCCriterion(FairseqCriterion):
@staticmethod
def add_args(parser):
parser.add_argument(
"--classification-head-name",
default="sentence_classification_head",
help="name of the ranking head to use",
)
def forward(self, model, sample, reduce=True):
hiddens, _ = model(
**sample["net_input"], features_only=True, return_all_hiddens=False
)
embeddings = []
# first token [CLS]
embeddings.append(hiddens[:, 0, :])
# other tokens
# net_input src_ranges range1/range2
# shape of [batch, range_len] padded with 0
for i in range(2):
# [batch, range_len, hidden]
index = (
sample["net_input"]["src_ranges"][f"range{i+1}"]
.unsqueeze(-1)
.expand([-1, -1, hiddens.size(-1)])
)
# [batch, range_len, hidden]
mask = index != 0
# [batch, range_len, hidden]
embedding = hiddens.gather(dim=1, index=index) * mask
# [batch, hidden]
embedding = embedding.sum(dim=1) / mask.sum(dim=1)
embeddings.append(embedding)
concat = torch.cat(embeddings, dim=1)
# RobertaClassificationHead expects [batch, len, hidden]
logits = model.classification_heads["sentence_classification_head"](
concat.unsqueeze(1)
)
targets = sample["target_labels"]
sample_size = targets.numel()
loss = F.cross_entropy(logits.view(-1, 2), targets.view(-1), reduction="sum")
tp = ((logits[:, 0] <= logits[:, 1]) & (targets == 1)).long().sum()
fp = ((logits[:, 0] <= logits[:, 1]) & (targets == 0)).long().sum()
fn = ((logits[:, 0] > logits[:, 1]) & (targets == 1)).long().sum()
tn = ((logits[:, 0] > logits[:, 1]) & (targets == 0)).long().sum()
# print(tp, fp, fn, tn, targets.size(0))
# print(logits, targets)
assert (tp + fp + tn + fn) == targets.size(0), 'invalid size'
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample_size,
"sample_size": sample_size,
}
_, preds = logits.max(dim=1)
logging_output.update(ncorrect=(preds == targets).sum().item())
logging_output.update(tp=utils.item(tp.data) if reduce else tp.data)
logging_output.update(fp=utils.item(fp.data) if reduce else fp.data)
logging_output.update(fn=utils.item(fn.data) if reduce else fn.data)
logging_output.update(tn=utils.item(tn.data) if reduce else tn.data)
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]:
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
metrics.log_scalar("accuracy", 100.0 * ncorrect / nsentences, nsentences, round=1)
tp_sum = float(sum(log.get('tp', 0) for log in logging_outputs))
fp_sum = float(sum(log.get('fp', 0) for log in logging_outputs))
fn_sum = float(sum(log.get('fn', 0) for log in logging_outputs))
tn_sum = float(sum(log.get('tn', 0) for log in logging_outputs))
if tp_sum + fp_sum + fn_sum + tn_sum > 0:
assert tp_sum + fp_sum + fn_sum + tn_sum == sample_size, 'invalid size when aggregating'
acc = (tp_sum + tn_sum) / sample_size
tmp = 2 * tp_sum + fp_sum + fn_sum
f1 = (2 * tp_sum) / tmp if tmp else 0
tmp = (tp_sum + fp_sum) * (tp_sum + fn_sum) * (tn_sum + fp_sum) * (tn_sum + fn_sum)
mcc = (tp_sum * tn_sum - fp_sum * fn_sum) / (tmp ** 0.5) if tmp else 0
metrics.log_scalar('sample_size', sample_size)
metrics.log_scalar('f1', f1)
metrics.log_scalar('mcc', mcc)
metrics.log_scalar('acc_f1', 0.5 * (acc + f1))
if len(logging_outputs) > 0 and 'x' in logging_outputs[0]:
x = np.concatenate([log.get('x', np.array([])) for log in logging_outputs])
y = np.concatenate([log.get('y', np.array([])) for log in logging_outputs])
pearson = stats.pearsonr(x, y)[0]
spearman = stats.spearmanr(x, y)[0]
metrics.log_scalar('pearson', pearson)
metrics.log_scalar('spearman', spearman)
metrics.log_scalar('pearson_spearman', 0.5 * (pearson + spearman))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
# @staticmethod
# def aggregate_logging_outputs(logging_outputs):
# """Aggregate logging outputs from data parallel training."""
# loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
# ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
# nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
# sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
# agg_output = {
# "loss": loss_sum / sample_size / math.log(2),
# "ntokens": ntokens,
# "nsentences": nsentences,
# "sample_size": sample_size,
# }
# if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]:
# ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
# agg_output.update(accuracy=ncorrect / nsentences)
# return agg_output
|
py | b40a72ed336d645eaa6be18b95e86e99cee87273 | from big_ol_pile_of_manim_imports import *
import displayer as disp
from hilbert.curves import \
TransformOverIncreasingOrders, FlowSnake, HilbertCurve, \
SnakeCurve
from constants import *
def get_grid():
return Grid(64, 64)
def get_freq_line():
return UnitInterval().shift(2*DOWN) ##Change?
def get_mathy_and_bubble():
mathy = Mathematician()
mathy.to_edge(DOWN).shift(4*LEFT)
bubble = SpeechBubble(initial_width = 8)
bubble.pin_to(mathy)
return mathy, bubble
class AboutSpaceFillingCurves(TransformOverIncreasingOrders):
@staticmethod
def args_to_string():
return ""
@staticmethod
def string_to_args(arg_str):
return ()
def construct(self):
self.bubble = ThoughtBubble().ingest_submobjects()
self.bubble.scale(1.5)
TransformOverIncreasingOrders.construct(self, FlowSnake, 7)
self.play(Transform(self.curve, self.bubble))
self.show_infinite_objects()
self.pose_question()
self.wait()
def show_infinite_objects(self):
sigma, summand, equals, result = TexMobject([
"\\sum_{n = 1}^{\\infty}",
"\\dfrac{1}{n^2}",
"=",
"\\dfrac{\pi^2}{6}"
]).split()
alt_summand = TexMobject("n").replace(summand)
alt_result = TexMobject("-\\dfrac{1}{12}").replace(result)
rationals, other_equals, naturals = TexMobject([
"|\\mathds{Q}|",
"=",
"|\\mathds{N}|"
]).scale(2).split()
infinity = TexMobject("\\infty").scale(2)
local_mobjects = list(filter(
lambda m : isinstance(m, Mobject),
list(locals().values()),
))
for mob in local_mobjects:
mob.sort_points(get_norm)
self.play(ShimmerIn(infinity))
self.wait()
self.play(
ShimmerIn(summand),
ShimmerIn(equals),
ShimmerIn(result),
DelayByOrder(Transform(infinity, sigma))
)
self.wait()
self.play(
Transform(summand, alt_summand),
Transform(result, alt_result),
)
self.wait()
self.remove(infinity)
self.play(*[
CounterclockwiseTransform(
Mobject(summand, equals, result, sigma),
Mobject(rationals, other_equals, naturals)
)
])
self.wait()
self.clear()
self.add(self.bubble)
def pose_question(self):
infinity, rightarrow, N = TexMobject([
"\\infty", "\\rightarrow", "N"
]).scale(2).split()
question_mark = TextMobject("?").scale(2)
self.add(question_mark)
self.wait()
self.play(*[
ShimmerIn(mob)
for mob in (infinity, rightarrow, N)
] + [
ApplyMethod(question_mark.next_to, rightarrow, UP),
])
self.wait()
class PostponePhilosophizing(Scene):
def construct(self):
abstract, arrow, concrete = TextMobject([
"Abstract", " $\\rightarrow$ ", "Concrete"
]).scale(2).split()
self.add(abstract, arrow, concrete)
self.wait()
self.play(*[
ApplyMethod(
word1.replace, word2,
path_func = path_along_arc(np.pi/2)
)
for word1, word2 in it.permutations([abstract, concrete])
])
self.wait()
class GrowHilbertWithName(Scene):
def construct(self):
curve = HilbertCurve(order = 1)
words = TextMobject("``Hilbert Curve''")
words.to_edge(UP, buff = 0.2)
self.play(
ShimmerIn(words),
Transform(curve, HilbertCurve(order = 2)),
run_time = 2
)
for n in range(3, 8):
self.play(
Transform(curve, HilbertCurve(order = n)),
run_time = 5. /n
)
class SectionOne(Scene):
def construct(self):
self.add(TextMobject("Section 1: Seeing with your ears"))
self.wait()
class WriteSomeSoftware(Scene):
pass #Done viea screen capture, written here for organization
class ImageToSound(Scene):
def construct(self):
string = Vibrate(color = BLUE_D, run_time = 5)
picture = ImageMobject("lion", invert = False)
picture.scale(0.8)
picture_copy = picture.copy()
picture.sort_points(get_norm)
string.mobject.sort_points(lambda p : -get_norm(p))
self.add(picture)
self.wait()
self.play(Transform(
picture, string.mobject,
run_time = 3,
rate_func = rush_into
))
self.remove(picture)
self.play(string)
for mob in picture_copy, string.mobject:
mob.sort_points(lambda p : get_norm(p)%1)
self.play(Transform(
string.mobject, picture_copy,
run_time = 5,
rate_func = rush_from
))
class LinksInDescription(Scene):
def construct(self):
text = TextMobject("""
See links in the description for more on
sight via sound.
""")
self.play(ShimmerIn(text))
self.play(ShowCreation(Arrow(text, 3*DOWN)))
self.wait(2)
class ImageDataIsTwoDimensional(Scene):
def construct(self):
image = ImageMobject("lion", invert = False)
image.scale(0.5)
image.shift(2*LEFT)
self.add(image)
for vect, num in zip([DOWN, RIGHT], [1, 2]):
brace = Brace(image, vect)
words_mob = TextMobject("Dimension %d"%num)
words_mob.next_to(image, vect, buff = 1)
self.play(
Transform(Point(brace.get_center()), brace),
ShimmerIn(words_mob),
run_time = 2
)
self.wait()
class SoundDataIsOneDimensional(Scene):
def construct(self):
overtones = 5
floor = 2*DOWN
main_string = Vibrate(color = BLUE_D)
component_strings = [
Vibrate(
num_periods = k+1,
overtones = 1,
color = color,
center = 2*DOWN + UP*k
)
for k, color in zip(
list(range(overtones)),
Color(BLUE_E).range_to(WHITE, overtones)
)
]
dots = [
Dot(
string.mobject.get_center(),
color = string.mobject.get_color()
)
for string in component_strings
]
freq_line = get_freq_line()
freq_line.shift(floor)
freq_line.sort_points(get_norm)
brace = Brace(freq_line, UP)
words = TextMobject("Range of frequency values")
words.next_to(brace, UP)
self.play(*[
TransformAnimations(
main_string.copy(),
string,
run_time = 5
)
for string in component_strings
])
self.clear()
self.play(*[
TransformAnimations(
string,
Animation(dot)
)
for string, dot in zip(component_strings, dots)
])
self.clear()
self.play(
ShowCreation(freq_line),
GrowFromCenter(brace),
ShimmerIn(words),
*[
Transform(
dot,
dot.copy().scale(2).rotate(-np.pi/2).shift(floor),
path_func = path_along_arc(np.pi/3)
)
for dot in dots
]
)
self.wait(0.5)
class GridOfPixels(Scene):
def construct(self):
low_res = ImageMobject("low_resolution_lion", invert = False)
high_res = ImageMobject("Lion", invert = False)
grid = get_grid().scale(0.8)
for mob in low_res, high_res:
mob.replace(grid, stretch = True)
side_brace = Brace(low_res, LEFT)
top_brace = Brace(low_res, UP)
top_words = TextMobject("256 Px", size = "\\normal")
side_words = top_words.copy().rotate(np.pi/2)
top_words.next_to(top_brace, UP)
side_words.next_to(side_brace, LEFT)
self.add(high_res)
self.wait()
self.play(DelayByOrder(Transform(high_res, low_res)))
self.wait()
self.play(
GrowFromCenter(top_brace),
GrowFromCenter(side_brace),
ShimmerIn(top_words),
ShimmerIn(side_words)
)
self.wait()
for mob in grid, high_res:
mob.sort_points(get_norm)
self.play(DelayByOrder(Transform(high_res, grid)))
self.wait()
class ShowFrequencySpace(Scene):
def construct(self):
freq_line = get_freq_line()
self.add(freq_line)
self.wait()
for tex, vect in zip(["20 Hz", "20{,}000 Hz"], [LEFT, RIGHT]):
tex_mob = TextMobject(tex)
tex_mob.to_edge(vect)
tex_mob.shift(UP)
arrow = Arrow(tex_mob, freq_line.get_edge_center(vect))
self.play(
ShimmerIn(tex_mob),
ShowCreation(arrow)
)
self.wait()
class AssociatePixelWithFrequency(Scene):
def construct(self):
big_grid_dim = 20.
small_grid_dim = 6.
big_grid = Grid(64, 64, height = big_grid_dim, width = big_grid_dim)
big_grid.to_corner(UP+RIGHT, buff = 2)
small_grid = big_grid.copy()
small_grid.scale(small_grid_dim/big_grid_dim)
small_grid.center()
pixel = MobjectFromRegion(
region_from_polygon_vertices(*0.2*np.array([
RIGHT+DOWN,
RIGHT+UP,
LEFT+UP,
LEFT+DOWN
]))
)
pixel.set_color(WHITE)
pixel_width = big_grid.width/big_grid.columns
pixel.set_width(pixel_width)
pixel.to_corner(UP+RIGHT, buff = 2)
pixel.shift(5*pixel_width*(2*LEFT+DOWN))
freq_line = get_freq_line()
dot = Dot()
dot.shift(freq_line.get_center() + 2*RIGHT)
string = Line(LEFT, RIGHT, color = GREEN)
arrow = Arrow(dot, string.get_center())
vibration_config = {
"overtones" : 1,
"spatial_period" : 2,
}
vibration, loud_vibration, quiet_vibration = [
Vibrate(string.copy(), amplitude = a, **vibration_config)
for a in [0.5, 1., 0.25]
]
self.add(small_grid)
self.wait()
self.play(
Transform(small_grid, big_grid)
)
self.play(FadeIn(pixel))
self.wait()
self.play(
FadeOut(small_grid),
ShowCreation(freq_line)
)
self.remove(small_grid)
self.play(
Transform(pixel, dot),
)
self.wait()
self.play(ShowCreation(arrow))
self.play(loud_vibration)
self.play(
TransformAnimations(loud_vibration, quiet_vibration),
ApplyMethod(dot.fade, 0.9)
)
self.clear()
self.add(freq_line, dot, arrow)
self.play(quiet_vibration)
class ListenToAllPixels(Scene):
def construct(self):
grid = get_grid()
grid.sort_points(get_norm)
freq_line = get_freq_line()
freq_line.sort_points(lambda p : p[0])
red, blue = Color(RED), Color(BLUE)
freq_line.set_color_by_gradient(red, blue)
colors = [
Color(rgb = interpolate(
np.array(red.rgb),
np.array(blue.rgb),
alpha
))
for alpha in np.arange(4)/3.
]
string = Line(3*LEFT, 3*RIGHT, color = colors[1])
vibration = Vibrate(string)
vibration_copy = vibration.copy()
vibration_copy.mobject.stroke_width = 1
sub_vibrations = [
Vibrate(
string.copy().shift((n-1)*UP).set_color(colors[n]),
overtones = 1,
spatial_period = 6./(n+1),
temporal_period = 1./(n+1),
amplitude = 0.5/(n+1)
)
for n in range(4)
]
words = TexMobject("&\\vdots \\\\ \\text{thousands }& \\text{of frequencies} \\\\ &\\vdots")
words.to_edge(UP, buff = 0.1)
self.add(grid)
self.wait()
self.play(DelayByOrder(ApplyMethod(
grid.set_color_by_gradient, red, blue
)))
self.play(Transform(grid, freq_line))
self.wait()
self.play(
ShimmerIn(
words,
rate_func = squish_rate_func(smooth, 0, 0.2)
),
*sub_vibrations,
run_time = 5
)
self.play(
*[
TransformAnimations(
sub_vib, vibration
)
for sub_vib in sub_vibrations
]+[FadeOut(words)]
)
self.clear()
self.add(freq_line)
self.play(vibration)
class LayAsideSpeculation(Scene):
def construct(self):
words = TextMobject("Would this actually work?")
grid = get_grid()
grid.set_width(6)
grid.to_edge(LEFT)
freq_line = get_freq_line()
freq_line.set_width(6)
freq_line.center().to_edge(RIGHT)
mapping = Mobject(
grid, freq_line, Arrow(grid, freq_line)
)
mapping.ingest_submobjects()
lower_left = Point().to_corner(DOWN+LEFT, buff = 0)
lower_right = Point().to_corner(DOWN+RIGHT, buff = 0)
self.add(words)
self.wait()
self.play(
Transform(words, lower_right),
Transform(lower_left, mapping)
)
self.wait()
class RandomMapping(Scene):
def construct(self):
grid = get_grid()
grid.set_width(6)
grid.to_edge(LEFT)
freq_line = get_freq_line()
freq_line.set_width(6)
freq_line.center().to_edge(RIGHT)
# for mob in grid, freq_line:
# indices = np.arange(mob.get_num_points())
# random.shuffle(indices)
# mob.points = mob.points[indices]
stars = Stars(stroke_width = grid.stroke_width)
self.add(grid)
targets = [stars, freq_line]
alphas = [not_quite_there(rush_into), rush_from]
for target, rate_func in zip(targets, alphas):
self.play(Transform(
grid, target,
run_time = 3,
rate_func = rate_func,
path_func = path_along_arc(-np.pi/2)
))
self.wait()
class DataScrambledAnyway(Scene):
def construct(self):
self.add(TextMobject("Data is scrambled anyway, right?"))
self.wait()
class LeverageExistingIntuitions(Scene):
def construct(self):
self.add(TextMobject("Leverage existing intuitions"))
self.wait()
class ThinkInTermsOfReverseMapping(Scene):
def construct(self):
grid = get_grid()
grid.set_width(6)
grid.to_edge(LEFT)
freq_line = get_freq_line()
freq_line.set_width(6)
freq_line.center().to_edge(RIGHT)
arrow = Arrow(grid, freq_line)
color1, color2 = YELLOW_C, RED
square_length = 0.01
dot1 = Dot(color = color1)
dot1.shift(3*RIGHT)
dot2 = Dot(color = color2)
dot2.shift(3.1*RIGHT)
arrow1 = Arrow(2*RIGHT+UP, dot1, color = color1, buff = 0.1)
arrow2 = Arrow(4*RIGHT+UP, dot2, color = color2, buff = 0.1)
dot3, arrow3 = [
mob.copy().shift(5*LEFT+UP)
for mob in (dot1, arrow1)
]
dot4, arrow4 = [
mob.copy().shift(5*LEFT+0.9*UP)
for mob in (dot2, arrow2)
]
self.add(grid, freq_line, arrow)
self.wait()
self.play(ApplyMethod(
arrow.rotate, np.pi,
path_func = clockwise_path()
))
self.wait()
self.play(ShowCreation(arrow1))
self.add(dot1)
self.play(ShowCreation(arrow2))
self.add(dot2)
self.wait()
self.remove(arrow1, arrow2)
self.play(
Transform(dot1, dot3),
Transform(dot2, dot4)
)
self.play(
ApplyMethod(grid.fade, 0.8),
Animation(Mobject(dot3, dot4))
)
self.play(ShowCreation(arrow3))
self.play(ShowCreation(arrow4))
self.wait()
class WeaveLineThroughPixels(Scene):
@staticmethod
def args_to_string(order):
return str(order)
@staticmethod
def string_to_args(order_str):
return int(order_str)
def construct(self, order):
start_color, end_color = RED, GREEN
curve = HilbertCurve(order = order)
line = Line(5*LEFT, 5*RIGHT)
for mob in curve, line:
mob.set_color_by_gradient(start_color, end_color)
freq_line = get_freq_line()
freq_line.replace(line, stretch = True)
unit = 6./(2**order) #sidelength of pixel
up = unit*UP
right = unit*RIGHT
lower_left = 3*(LEFT+DOWN)
squares = Mobject(*[
Square(
side_length = unit,
color = WHITE
).shift(x*right+y*up)
for x, y in it.product(list(range(2**order)), list(range(2**order)))
])
squares.center()
targets = Mobject()
for square in squares.submobjects:
center = square.get_center()
distances = np.apply_along_axis(
lambda p : get_norm(p-center),
1,
curve.points
)
index_along_curve = np.argmin(distances)
fraction_along_curve = index_along_curve/float(curve.get_num_points())
target = square.copy().center().scale(0.8/(2**order))
line_index = int(fraction_along_curve*line.get_num_points())
target.shift(line.points[line_index])
targets.add(target)
self.add(squares)
self.play(ShowCreation(
curve,
run_time = 5,
rate_func = None
))
self.wait()
self.play(
Transform(curve, line),
Transform(squares, targets),
run_time = 3
)
self.wait()
self.play(ShowCreation(freq_line))
self.wait()
class WellPlayedGameOfSnake(Scene):
def construct(self):
grid = Grid(16, 16).fade()
snake_curve = SnakeCurve(order = 4)
words = TextMobject("``Snake Curve''")
words.next_to(grid, UP)
self.add(grid)
self.play(ShowCreation(
snake_curve,
run_time = 7,
rate_func = None
))
self.wait()
self.play(ShimmerIn(words))
self.wait()
class TellMathematicianFriend(Scene):
def construct(self):
mathy, bubble = get_mathy_and_bubble()
squiggle_mouth = mathy.mouth.copy()
squiggle_mouth.apply_function(
lambda x_y_z : (x_y_z[0], x_y_z[1]+0.02*np.sin(50*x_y_z[0]), x_y_z[2])
)
bubble.ingest_submobjects()
bubble.write("Why not use a Hilbert curve \\textinterrobang ")
words1 = bubble.content
bubble.write("So, it's not one curve but an infinite family of curves \\dots")
words2 = bubble.content
bubble.write("Well, no, it \\emph{is} just one thing, but I need \\\\ \
to tell you about a certain infinite family first.")
words3 = bubble.content
description = TextMobject("Mathematician friend", size = "\\small")
description.next_to(mathy, buff = 2)
arrow = Arrow(description, mathy)
self.add(mathy)
self.play(
ShowCreation(arrow),
ShimmerIn(description)
)
self.wait()
point = Point(bubble.get_tip())
self.play(
Transform(point, bubble),
)
self.remove(point)
self.add(bubble)
self.play(ShimmerIn(words1))
self.wait()
self.remove(description, arrow)
self.play(
Transform(mathy.mouth, squiggle_mouth),
ApplyMethod(mathy.arm.wag, 0.2*RIGHT, LEFT),
)
self.remove(words1)
self.add(words2)
self.wait(2)
self.remove(words2)
self.add(words3)
self.wait(2)
self.play(
ApplyPointwiseFunction(
lambda p : 15*p/get_norm(p),
bubble
),
ApplyMethod(mathy.shift, 5*(DOWN+LEFT)),
FadeOut(words3),
run_time = 3
)
class Order1PseudoHilbertCurve(Scene):
def construct(self):
words, s = TextMobject(["Pseudo-Hilbert Curve", "s"]).split()
pre_words = TextMobject("Order 1")
pre_words.next_to(words, LEFT, buff = 0.5)
s.next_to(words, RIGHT, buff = 0.05, aligned_edge = DOWN)
cluster = Mobject(pre_words, words, s)
cluster.center()
cluster.scale(0.7)
cluster.to_edge(UP, buff = 0.3)
cluster.set_color(GREEN)
grid1 = Grid(1, 1)
grid2 = Grid(2, 2)
curve = HilbertCurve(order = 1)
self.add(words, s)
self.wait()
self.play(Transform(
s, pre_words,
path_func = path_along_arc(-np.pi/3)
))
self.wait()
self.play(ShowCreation(grid1))
self.wait()
self.play(ShowCreation(grid2))
self.wait()
kwargs = {
"run_time" : 5,
"rate_func" : None
}
self.play(ShowCreation(curve, **kwargs))
self.wait()
class Order2PseudoHilbertCurve(Scene):
def construct(self):
words = TextMobject("Order 2 Pseudo-Hilbert Curve")
words.to_edge(UP, buff = 0.3)
words.set_color(GREEN)
grid2 = Grid(2, 2)
grid4 = Grid(4, 4, stroke_width = 2)
# order_1_curve = HilbertCurve(order = 1)
# squaggle_curve = order_1_curve.copy().apply_function(
# lambda (x, y, z) : (x + np.cos(3*y), y + np.sin(3*x), z)
# )
# squaggle_curve.show()
mini_curves = [
HilbertCurve(order = 1).scale(0.5).shift(1.5*vect)
for vect in [
LEFT+DOWN,
LEFT+UP,
RIGHT+UP,
RIGHT+DOWN
]
]
last_curve = mini_curves[0]
naive_curve = Mobject(last_curve)
for mini_curve in mini_curves[1:]:
line = Line(last_curve.points[-1], mini_curve.points[0])
naive_curve.add(line, mini_curve)
last_curve = mini_curve
naive_curve.ingest_submobjects()
naive_curve.set_color_by_gradient(RED, GREEN)
order_2_curve = HilbertCurve(order = 2)
self.add(words, grid2)
self.wait()
self.play(ShowCreation(grid4))
self.play(*[
ShowCreation(mini_curve)
for mini_curve in mini_curves
])
self.wait()
self.play(ShowCreation(naive_curve, run_time = 5))
self.remove(*mini_curves)
self.wait()
self.play(Transform(naive_curve, order_2_curve))
self.wait()
class Order3PseudoHilbertCurve(Scene):
def construct(self):
words = TextMobject("Order 3 Pseudo-Hilbert Curve")
words.set_color(GREEN)
words.to_edge(UP)
grid4 = Mobject(
Grid(2, 2),
Grid(4, 4, stroke_width = 2)
)
grid8 = Grid(8, 8, stroke_width = 1)
order_3_curve = HilbertCurve(order = 3)
mini_curves = [
HilbertCurve(order = 2).scale(0.5).shift(1.5*vect)
for vect in [
LEFT+DOWN,
LEFT+UP,
RIGHT+UP,
RIGHT+DOWN
]
]
self.add(words, grid4)
self.wait()
self.play(ShowCreation(grid8))
self.wait()
self.play(*list(map(GrowFromCenter, mini_curves)))
self.wait()
self.clear()
self.add(words, grid8, *mini_curves)
self.play(*[
ApplyMethod(curve.rotate_in_place, np.pi, axis)
for curve, axis in [
(mini_curves[0], UP+RIGHT),
(mini_curves[3], UP+LEFT)
]
])
self.play(ShowCreation(order_3_curve, run_time = 5))
self.wait()
class GrowToOrder8PseudoHilbertCurve(Scene):
def construct(self):
self.curve = HilbertCurve(order = 1)
self.add(self.curve)
self.wait()
while self.curve.order < 8:
self.increase_order()
def increase_order(self):
mini_curves = [
self.curve.copy().scale(0.5).shift(1.5*vect)
for vect in [
LEFT+DOWN,
LEFT+UP,
RIGHT+UP,
RIGHT+DOWN
]
]
self.remove(self.curve)
self.play(
Transform(self.curve.copy(), mini_curves[0])
)
self.play(*[
GrowFromCenter(mini_curve)
for mini_curve in mini_curves[1:]
])
self.wait()
self.clear()
self.add(*mini_curves)
self.play(*[
ApplyMethod(curve.rotate_in_place, np.pi, axis)
for curve, axis in [
(mini_curves[0], UP+RIGHT),
(mini_curves[3], UP+LEFT)
]
])
self.curve = HilbertCurve(order = self.curve.order+1)
self.play(ShowCreation(self.curve, run_time = 2))
self.remove(*mini_curves)
self.wait()
class UseOrder8(Scene):
def construct(self):
mathy, bubble = get_mathy_and_bubble()
bubble.write("For a 256x256 pixel array...")
words = TextMobject("Order 8 Pseudo-Hilbert Curve")
words.set_color(GREEN)
words.to_edge(UP, buff = 0.3)
curve = HilbertCurve(order = 8)
self.add(mathy, bubble)
self.play(ShimmerIn(bubble.content))
self.wait()
self.clear()
self.add(words)
self.play(ShowCreation(
curve, run_time = 7, rate_func = None
))
self.wait()
class HilbertBetterThanSnakeQ(Scene):
def construct(self):
hilbert_curves, snake_curves = [
[
CurveClass(order = n)
for n in range(2, 7)
]
for CurveClass in (HilbertCurve, SnakeCurve)
]
for curve in hilbert_curves+snake_curves:
curve.scale(0.8)
for curve in hilbert_curves:
curve.to_edge(LEFT)
for curve in snake_curves:
curve.to_edge(RIGHT)
greater_than = TexMobject(">")
question_mark = TextMobject("?")
question_mark.next_to(greater_than, UP)
self.add(greater_than, question_mark)
hilbert_curve = hilbert_curves[0]
snake_curve = snake_curves[0]
for new_hc, new_sc in zip(hilbert_curves[1:], snake_curves[1:]):
self.play(*[
Transform(hilbert_curve, new_hc),
Transform(snake_curve, new_sc)
])
self.wait()
class ImagineItWorks(Scene):
def construct(self):
self.add(TextMobject("Imagine your project succeeds..."))
self.wait()
class RandyWithHeadphones(Scene):
def construct(self):
headphones = ImageMobject("Headphones.png")
headphones.scale(0.1)
headphones.stretch(2, 0)
headphones.shift(1.2*UP+0.05*LEFT)
headphones.set_color(GREY)
randy = Randolph()
self.add(randy, headphones)
self.wait(2)
self.play(ApplyMethod(randy.blink))
self.wait(4)
class IncreaseResolution(Scene):
def construct(self):
grids = [
Grid(
2**order, 2**order,
stroke_width = 1
).shift(0.3*DOWN)
for order in (6, 7)
]
grid = grids[0]
side_brace = Brace(grid, LEFT)
top_brace = Brace(grid, UP)
top_words = TextMobject("256")
new_top_words = TextMobject("512")
side_words = top_words.copy()
new_side_words = new_top_words.copy()
for words in top_words, new_top_words:
words.next_to(top_brace, UP, buff = 0.1)
for words in side_words, new_side_words:
words.next_to(side_brace, LEFT)
self.add(grid)
self.play(
GrowFromCenter(side_brace),
GrowFromCenter(top_brace),
ShimmerIn(top_words),
ShimmerIn(side_words)
)
self.wait()
self.play(
DelayByOrder(Transform(*grids)),
Transform(top_words, new_top_words),
Transform(side_words, new_side_words)
)
self.wait()
class IncreasingResolutionWithSnakeCurve(Scene):
def construct(self):
start_curve = SnakeCurve(order = 6)
end_curve = SnakeCurve(order = 7)
start_dots, end_dots = [
Mobject(*[
Dot(
curve.points[int(x*curve.get_num_points())],
color = color
)
for x, color in [
(0.202, GREEN),
(0.48, BLUE),
(0.7, RED)
]
])
for curve in (start_curve, end_curve)
]
self.add(start_curve)
self.wait()
self.play(
ShowCreation(start_dots, run_time = 2),
ApplyMethod(start_curve.fade)
)
end_curve.fade()
self.play(
Transform(start_curve, end_curve),
Transform(start_dots, end_dots)
)
self.wait()
class TrackSpecificCurvePoint(Scene):
CURVE_CLASS = None #Fillin
def construct(self):
line = get_freq_line().center()
line.sort_points(lambda p : p[0])
curves = [
self.CURVE_CLASS(order = order)
for order in range(3, 10)
]
alpha = 0.48
dot = Dot(UP)
start_dot = Dot(0.1*LEFT)
dots = [
Dot(curve.points[alpha*curve.get_num_points()])
for curve in curves
]
self.play(ShowCreation(line))
self.play(Transform(dot, start_dot))
self.wait()
for new_dot, curve in zip(dots, curves):
self.play(
Transform(line, curve),
Transform(dot, new_dot)
)
self.wait()
class TrackSpecificSnakeCurvePoint(TrackSpecificCurvePoint):
CURVE_CLASS = SnakeCurve
class NeedToRelearn(Scene):
def construct(self):
top_words = TextMobject("Different pixel-frequency association")
bottom_words = TextMobject("Need to relearn sight-via-sound")
top_words.shift(UP)
bottom_words.shift(DOWN)
arrow = Arrow(top_words, bottom_words)
self.play(ShimmerIn(top_words))
self.wait()
self.play(ShowCreation(arrow))
self.play(ShimmerIn(bottom_words))
self.wait()
class TrackSpecificHilbertCurvePoint(TrackSpecificCurvePoint):
CURVE_CLASS = HilbertCurve
|
py | b40a746273319e55873028eba88b29823b7b856e | """
Copyright VIP Group
Licensed under the Apache License, Version 2.0.
Modify from https://github.com/rwightman/pytorch-image-models
Original copyright of Ross Wightman below, modifications by VIP Group
Hacked together by / copyright Ross Wightman
"""
import jittor.nn as nn
import jittor.nn as F
from .conv_bn_act import ConvBnAct
from .create_conv2d import create_conv2d
from .norm_act import BatchNorm2d
class Involution(nn.Module):
def __init__(
self,
channels,
kernel_size=3,
stride=1,
group_size=16,
rd_ratio=4,
norm_layer=BatchNorm2d,
act_layer=nn.ReLU,
):
super(Involution, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.channels = channels
self.group_size = group_size
self.groups = self.channels // self.group_size
self.conv1 = ConvBnAct(
in_channels=channels,
out_channels=channels // rd_ratio,
kernel_size=1,
norm_layer=norm_layer,
act_layer=act_layer)
self.conv2 = self.conv = create_conv2d(
in_channels=channels // rd_ratio,
out_channels=kernel_size ** 2 * self.groups,
kernel_size=1,
stride=1)
self.avgpool = nn.AvgPool2d(stride, stride) if stride == 2 else nn.Identity()
# self.unfold = nn.Unfold(kernel_size, 1, (kernel_size-1)//2, stride)
def execute(self, x):
weight = self.conv2(self.conv1(self.avgpool(x)))
B, C, H, W = weight.shape
KK = int(self.kernel_size ** 2)
weight = weight.view(B, self.groups, KK, H, W).unsqueeze(2)
out = F.unfold(x, self.kernel_size, 1, (self.kernel_size - 1) // 2, self.stride).view(B, self.groups,
self.group_size, KK, H, W)
out = (weight * out).sum(dim=3).view(B, self.channels, H, W)
return out
|
py | b40a75bd971647b320de8bb1f1a5fff06624564a | #!/env/bin/python
"""USEQ tools"""
import sys
import argparse
from genologics.lims import Lims
# import resources
import utilities
import epp
import daemons
from config import Config
#Commandline utility Functions
def manage_accounts(args):
"""Create,Edit,Retrieve accounts (labs)"""
utilities.useq_manage_accounts.run(lims, args.mode, args.csv, args.account)
def client_mail(args):
"""Send email to all specific USEQ clients, all clients belonging to an account or a single specific client."""
utilities.useq_client_mail.run(lims, config.MAIL_SENDER, args.content, args.mode, args.attachment, args.name)
def share_data(args):
"""Encrypt and Share one or more datasets"""
utilities.useq_share_run.run(lims, args.ids, args.username, args.dir, args.fid, args.link_portal)
def budget_overview(args):
utilities.useq_budget_overview.run(lims, args.budgetnrs, args.output_file)
def get_researchers(args):
utilities.useq_get_researchers.run(lims)
def manage_runids(args):
utilities.useq_manage_runids.run(lims, args.csv, args.mode)
def link_run_results(args):
utilities.useq_link_run_results.run(lims, args.runid)
def year_overview(args):
utilities.useq_year_overview.run(lims, args.year, args.output)
#Clarity epp scripts
def run_status_mail(args):
"""Send run started mail"""
epp.useq_run_status_mail.run(lims, config.MAIL_SENDER, config.MAIL_ADMINS, args.mode ,args.step_uri)
def modify_samplesheet(args):
"""Reverse complements the barcodes in a samplesheet"""
epp.useq_modify_samplesheet.run(lims, args.step, args.aid, args.output_file, args.mode)
def group_permissions(args):
"""Checks if a user trying to execute a LIMS step is part of the specified group(s)"""
epp.useq_group_permissions.run(lims,args.step, args.groups)
def finance_overview(args):
"""Creates a finance overview, used for billing, for all runs in the current step"""
epp.useq_finance_overview.run(lims, args.step, args.output_file)
def route_artifacts(args):
"""Route artifacts to the appropriate step in a workflow"""
epp.useq_route_artifacts.run(lims, args.step, args.input)
def close_projects(args):
"""Close all projects included in the current step"""
epp.useq_close_projects.run(lims, args.step, args.pid)
def create_recipe(args):
"""Create Novaseq run recipe"""
epp.useq_create_recipe.run(lims, args.step,args.output_file)
def create_samplesheet(args):
"""Create generic v2 samplesheet"""
epp.useq_create_samplesheet.run(lims, args.step,args.output_file)
#Daemon scripts
def nextcloud_monitor(args):
"""Is intended to run as a daemon to check the space remaining on the Nextcloud storage"""
daemons.useq_nextcloud_monitor.run()
def manage_runs(args):
"""Script responsible for starting conversion, transfer, cleanup and archiving of sequencing runs"""
daemons.useq_manage_runs.run(lims)
def run_overview(args):
"""Creates json file intended for the USEQ-Overview website"""
daemons.useq_run_overview.run(lims, args.overview_file)
if __name__ == "__main__":
global lims
# Setup lims connection
lims = Lims(Config.LIMS_URI, Config.LIMS_USER, Config.LIMS_PW)
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers()
#Utility parsers
parser_utilities = subparser.add_parser('utilities',help="Utility functions: manage_accounts, client_mail, share_data, budget_overview , manage_runids,link_run_results, get_researchers, year_overview")
subparser_utilities = parser_utilities.add_subparsers()
parser_manage_accounts = subparser_utilities.add_parser('manage_accounts', help='Create, Edit & Retrieve accounts (labs)')
parser_manage_accounts.add_argument('-m','--mode',choices=['create','edit','retrieve'])
parser_manage_accounts.add_argument('-c','--csv', help='Path to input or output csv file')
parser_manage_accounts.add_argument('-a','--account', help='Account name or ID. Leave empty for mode "create"', default=None)
parser_manage_accounts.set_defaults(func=manage_accounts)
parser_client_mail = subparser_utilities.add_parser('client_mail', help='Send email to all specific USEQ users, all clients belonging to an account or a single specific client.')
parser_client_mail.add_argument('-m','--mode',choices=['all','labs','accounts'])
parser_client_mail.add_argument('-c','--content', help='Path to content file (see resources for example)', nargs='?' ,type=argparse.FileType('r'))
parser_client_mail.add_argument('-n','--name', help='Lab or Account name(s) separated by comma. Leave empty for mode "all"')
parser_client_mail.add_argument('-a','--attachment', help='Path to attachment file')
parser_client_mail.set_defaults(func=client_mail)
parser_share_data = subparser_utilities.add_parser('share_data', help='Shares raw data allready on Nextcloud (by ID) or uploads data to nextcloud and then shares it (email and dir).')
parser_share_data.add_argument('-i', '--ids', help='One or more Project ID(s) to share, separated by comma.')
parser_share_data.add_argument('-u', '--username', help='Username to share data with.', default=None)
parser_share_data.add_argument('-d', '--dir', help='Directory containing data to share.', default=None)
parser_share_data.add_argument('-f', '--fid', help='Overrides the Flowcell ID found in the LIMS, ONLY use when the Flowcell ID in the LIMS is wrong (ROOOBIIIIN!!).', default=None)
parser_share_data.add_argument('-l', '--link_portal', help='Try and link run results to portal instance of run. Only works in combination with --ids.', default=None)
parser_share_data.set_defaults(func=share_data)
parser_budget_ovw = subparser_utilities.add_parser('budget_overview', help='Get an overview of all costs booked to supplied budget numbers.')
parser_budget_ovw.add_argument('-o','--output_file', nargs='?', type=argparse.FileType('w'), default=sys.stdout, help='Output file path (default=stdout)')
parser_budget_ovw.add_argument('-b', '--budgetnrs', required=True)
parser_budget_ovw.set_defaults(func=budget_overview)
parser_manage_runids = subparser_utilities.add_parser('manage_runids', help='Link or unlink one or multiple runIDs for a user.')
parser_manage_runids.add_argument('-c', '--csv', help='Path to csv file', nargs='?' ,type=argparse.FileType('r') ,required=True)
parser_manage_runids.add_argument('-m', '--mode', choices=['link','unlink'] ,required=True)
parser_manage_runids.set_defaults(func=manage_runids)
parser_link_run_results = subparser_utilities.add_parser('link_run_results', help='Link the run results for a runID.')
parser_link_run_results.add_argument('-i', '--runid', help='LIMS runID', default=None)
# parser_link_run_results.add_argument('-p', '--rundir', help='Path the run directory', required=True)
parser_link_run_results.set_defaults(func=link_run_results)
parser_get_researchers = subparser_utilities.add_parser('get_researchers', help='Get all info for all researchers')
parser_get_researchers.set_defaults(func=get_researchers)
parser_year_overview = subparser_utilities.add_parser('year_overview', help='Create an overview of all USEQ projects in a given year / all years.')
parser_year_overview.add_argument('-o', '--output', help='Path to output file', nargs='?' ,type=argparse.FileType('w') , default=sys.stdout)
parser_year_overview.add_argument('-y', '--year', help='Year, leave empty for all', default=None)
parser_year_overview.set_defaults(func=year_overview)
# year_overview
#epp parsers
parser_epp = subparser.add_parser('epp',help='Clarity epp functions: run_status_mail, modify_samplesheet, group_permissions, finance_overview, route_artifacts, close_projects ')
subparser_epp = parser_epp.add_subparsers()
parser_run_status_mail = subparser_epp.add_parser('run_status', help='Sends a status email about a run depending on the mode, mail type depends on mode')
parser_run_status_mail.add_argument('-m', '--mode' ,choices=['run_started','run_finished'])
parser_run_status_mail.add_argument('-s', '--step_uri', help="The URI of the step that launched this script. Needed for modes: 'run_status', 'run_finished'", default=None)
parser_run_status_mail.set_defaults(func=run_status_mail)
parser_modify_samplesheet = subparser_epp.add_parser('modify_samplesheet', help='This script is used to modify a samplesheet to work with either NextSeq or MiSeq/HiSeq. Currently all it does is reverse complement the barcodes when needed')
parser_modify_samplesheet.add_argument('-s', '--step', help='Step URI', required=True)
parser_modify_samplesheet.add_argument('-a', '--aid', help='Artifact ID', required=True)
parser_modify_samplesheet.add_argument('-m', '--mode', help='Run mode', choices=['rev', 'v1tov2'], required=True)
parser_modify_samplesheet.add_argument('-o','--output_file', nargs='?', type=argparse.FileType('w'), default=sys.stdout, help='Output file path (default=stdout)')
parser_modify_samplesheet.set_defaults(func=modify_samplesheet)
parser_group_permissions = subparser_epp.add_parser('group_permissions', help='Script that checks if a user trying to execute a LIMS step is part of the specified group(s)')
parser_group_permissions.add_argument('-s', '--step', help='Step URI', required=True)
parser_group_permissions.add_argument('-g', '--groups', help='Groups to give permission to', required=True)
parser_group_permissions.set_defaults(func=group_permissions)
parser_finance_overview = subparser_epp.add_parser('finance_overview', help='Creates a finance overview for all runs included in the step')
parser_finance_overview.add_argument('-s', '--step', help='Step URI', required=True)
parser_finance_overview.add_argument('-o','--output_file', nargs='?', type=argparse.FileType('w'), default=sys.stdout, help='Output file path (default=stdout)')
parser_finance_overview.set_defaults(func=finance_overview)
parser_route_artifacts = subparser_epp.add_parser('route_artifacts', help='Route artifacts to the next appropriate step in the workflow')
parser_route_artifacts.add_argument('-s', '--step', help='Step URI', required=True)
parser_route_artifacts.add_argument('-i', '--input', help='Use input artifact', default=False)
parser_route_artifacts.set_defaults(func=route_artifacts)
parser_close_projects = subparser_epp.add_parser('close_projects', help='Close all projects included in the specified step')
parser_close_projects.add_argument('-s', '--step', help='Step URI', required=False)
parser_close_projects.add_argument('-p', '--pid', required=False, default=None, help='ProjectID, Overrides Step URI')
parser_close_projects.set_defaults(func=close_projects)
parser_create_recipe = subparser_epp.add_parser('create_recipe', help='Creates a novaseq run recipe. Can only be started from the USEQ - Denature, Dilute and Load (Novaseq) step.')
parser_create_recipe.add_argument('-s', '--step', help='Step URI', required=True)
parser_create_recipe.add_argument('-o','--output_file', nargs='?', type=argparse.FileType('w'), default=sys.stdout, help='Output file path (default=stdout)')
parser_create_recipe.set_defaults(func=create_recipe)
parser_create_samplesheet = subparser_epp.add_parser('create_samplesheet', help='Creates a v2 samplesheet.')
parser_create_samplesheet.add_argument('-s', '--step', help='Step URI', required=True)
parser_create_samplesheet.add_argument('-o','--output_file', nargs='?', type=argparse.FileType('w'), default=sys.stdout, help='Output file path (default=stdout)')
parser_create_samplesheet.set_defaults(func=create_samplesheet)
#Daemon parsers
parser_daemons = subparser.add_parser('daemons', help='USEQ daemon scripts: check_nextcloud_storage,manage_runs ')
subparser_daemons = parser_daemons.add_subparsers()
parser_nextcloud_monitor = subparser_daemons.add_parser('nextcloud_monitor', help='Daemon that monitors the NextCloud storage and sends a mail when the threshold has been reached.')
parser_nextcloud_monitor.set_defaults(func=nextcloud_monitor)
parser_manage_runs = subparser_daemons.add_parser('manage_runs', help='Daemon responsible for starting conversion, transfer, cleanup and archiving of sequencing runs')
# parser_manage_runs.add_argument('-m', '--missing_bcl', help='Run conversion with --ignore-missing-bcls flag', default=False)
# parser_manage_runs.add_argument('-b', '--barcode_mismatches', help='Run conversion with n mismatches allowed in index', default=1)
# parser_manage_runs.add_argument('-f', '--fastq_for_index', help='Create FastQ for index reads', default=False)
# parser_manage_runs.add_argument('-s', '--short_reads', help='Sets --minimum-trimmed-read-length and --mask-short-adapter-reads to 0 allowing short reads to pass filter', default=False)
# parser_manage_runs.add_argument('-u', '--use_bases_mask', help='Use this base mask', default=None)
parser_manage_runs.set_defaults(func=manage_runs)
parser_run_overview = subparser_daemons.add_parser('run_overview', help='Daemon responsible for updating the run overview json file used in the USEQ-Overview website.')
parser_run_overview.add_argument('-o', '--overview_file', help='', default='overview.json')
parser_run_overview.set_defaults(func=run_overview)
args = parser.parse_args()
args.func(args)
#EPP Functions
|
py | b40a75f1c9ded8e1547e668ecce525045f293c0b | import qi
import argparse
import sys
import time
import threading
import action_base
from action_base import *
actionName = "reccam"
logkey = 'NAOqibag/EnableCamera'
def actionThread_exec (params):
t = threading.currentThread()
memory_service = getattr(t, "mem_serv", None)
print "Action "+actionName+" "+params+" started"
# action init
if (params=='off'):
memory_service.raiseEvent(logkey,0.0)
print " -- Recording data disabled --"
else:
memory_service.raiseEvent(logkey,0.5)
print " -- Recording data enabled --"
# action exec
time.sleep(1.0)
# action end
action_success(actionName,params)
def init(session):
global orderID
print actionName+" init"
action_base.init(session, actionName, actionThread_exec)
session.service("ALMemory").declareEvent('DialogueVequest')
def quit():
print actionName+" quit"
actionThread_exec.do_run = False
if __name__ == "__main__":
app = action_base.initApp(actionName)
init(app.session)
#Program stays at this point until we stop it
app.run()
quit()
|
py | b40a76b659747db6e07411b67d98db1587ec86dd | from pygments.style import Style
from pygments.token import (
Comment, Error, Keyword, Literal, Name, Number, Operator, String, Text
)
class BaseSixteenStyle(Style):
base00 = '#f5f7ff'
base01 = '#dfe2f1'
base02 = '#979db4'
base03 = '#898ea4'
base04 = '#6b7394'
base05 = '#5e6687'
base06 = '#293256'
base07 = '#202746'
base08 = '#c94922'
base09 = '#c76b29'
base0a = '#c08b30'
base0b = '#ac9739'
base0c = '#22a2c9'
base0d = '#3d8fd1'
base0e = '#6679cc'
base0f = '#9c637a'
default_style = ''
background_color = base00
highlight_color = base02
styles = {
Text: base05,
Error: base08, # .err
Comment: f'italic {base03}', # .c
Comment.Preproc: base0f, # .cp
Comment.PreprocFile: base0b, # .cpf
Keyword: base0e, # .k
Keyword.Type: base08, # .kt
Name.Attribute: base0d, # .na
Name.Builtin: base0d, # .nb
Name.Builtin.Pseudo: base08, # .bp
Name.Class: base0d, # .nc
Name.Constant: base09, # .no
Name.Decorator: base09, # .nd
Name.Function: base0d, # .nf
Name.Namespace: base0d, # .nn
Name.Tag: base0e, # .nt
Name.Variable: base0d, # .nv
Name.Variable.Instance: base08, # .vi
Number: base09, # .m
Operator: base0c, # .o
Operator.Word: base0e, # .ow
Literal: base0b, # .l
String: base0b, # .s
String.Interpol: base0f, # .si
String.Regex: base0c, # .sr
String.Symbol: base09, # .ss
}
from string import capwords # noqa: E402
BaseSixteenStyle.__name__ = 'BaseSixteen{}Style'.format(
capwords('atelier-sulphurpool-light', '-').replace('-', '')
)
globals()[BaseSixteenStyle.__name__] = globals()['BaseSixteenStyle']
del globals()['BaseSixteenStyle']
del capwords
|
py | b40a770d15cf11ce7ed20d14e877ed824c03eaf6 | """Contains data helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import codecs
import os
import tarfile
import time
try:
import queue
except ImportError:
import Queue as queue
from threading import Thread
from multiprocessing import Process, Manager, Value
from paddle.dataset.common import md5file
def read_manifest(manifest_path, max_duration=float('inf'), min_duration=0.0):
"""Load and parse manifest file.
Instances with durations outside [min_duration, max_duration] will be
filtered out.
:param manifest_path: Manifest file to load and parse.
:type manifest_path: basestring
:param max_duration: Maximal duration in seconds for instance filter.
:type max_duration: float
:param min_duration: Minimal duration in seconds for instance filter.
:type min_duration: float
:return: Manifest parsing results. List of dict.
:rtype: list
:raises IOError: If failed to parse the manifest.
"""
manifest = []
for json_line in codecs.open(manifest_path, 'r', 'utf-8'):
try:
json_data = json.loads(json_line)
except Exception as e:
raise IOError("Error reading manifest: %s" % str(e))
if (json_data["duration"] <= max_duration and
json_data["duration"] >= min_duration):
manifest.append(json_data)
return manifest
def getfile_insensitive(path):
"""Get the actual file path when given insensitive filename."""
directory, filename = os.path.split(path)
directory, filename = (directory or '.'), filename.lower()
for f in os.listdir(directory):
newpath = os.path.join(directory, f)
if os.path.isfile(newpath) and f.lower() == filename:
return newpath
def download_multi(url, target_dir, extra_args):
"""Download multiple files from url to target_dir."""
if not os.path.exists(target_dir): os.makedirs(target_dir)
print("Downloading %s ..." % url)
ret_code = os.system("wget -c " + url + ' ' + extra_args + " -P " +
target_dir)
return ret_code
def download(url, md5sum, target_dir):
"""Download file from url to target_dir, and check md5sum."""
if not os.path.exists(target_dir): os.makedirs(target_dir)
filepath = os.path.join(target_dir, url.split("/")[-1])
if not (os.path.exists(filepath) and md5file(filepath) == md5sum):
print("Downloading %s ..." % url)
os.system("wget -c " + url + " -P " + target_dir)
print("\nMD5 Chesksum %s ..." % filepath)
if not md5file(filepath) == md5sum:
raise RuntimeError("MD5 checksum failed.")
else:
print("File exists, skip downloading. (%s)" % filepath)
return filepath
def unpack(filepath, target_dir, rm_tar=False):
"""Unpack the file to the target_dir."""
print("Unpacking %s ..." % filepath)
tar = tarfile.open(filepath)
tar.extractall(target_dir)
tar.close()
if rm_tar == True:
os.remove(filepath)
class XmapEndSignal():
pass
|
py | b40a7793ad884747f73c2d1d7a8595c73a9ce2ed | import base64
import pytest
@pytest.mark.models(
'backends/postgres/dtypes/binary',
)
def test_insert(model, app):
data = base64.b64encode(b'data').decode('ascii')
app.authmodel(model, ['insert'])
resp = app.post(f'/{model}', json={'blob': data})
assert resp.status_code == 201, resp.json()
assert resp.json()['blob'] == data
@pytest.mark.models(
'backends/postgres/dtypes/binary',
)
def test_upsert(model, app):
data = base64.b64encode(b'data').decode('ascii')
app.authorize(['spinta_set_meta_fields'])
app.authmodel(model, ['upsert'])
if ':dataset/' in model:
pk = '844b08602aeffbf0d12dbfd5f2e861c7501ed2cb'
else:
pk = '9ea9cf88-68f6-4753-b9e6-ce3d40ba1861'
resp = app.post(f'/{model}', json={
'_op': 'upsert',
'_where': f'_id="{pk}"',
'_id': pk,
'blob': data,
})
assert resp.status_code == 201, resp.json()
assert resp.json()['_id'] == pk
assert resp.json()['blob'] == data
resp = app.post(f'/{model}', json={
'_op': 'upsert',
'_where': f'_id="{pk}"',
'_id': pk,
'blob': data,
})
assert resp.status_code == 200, resp.json()
assert resp.json()['_id'] == pk
assert 'blob' not in resp.json()
@pytest.mark.models(
'datasets/dtypes/binary',
)
def test_getone(model, app):
data = base64.b64encode(b'data').decode('ascii')
app.authmodel(model, ['insert', 'getone'])
resp = app.post(f'/{model}', json={'blob': data})
assert resp.status_code == 201, resp.json()
assert resp.json()['blob'] == data
pk = resp.json()['_id']
resp = app.get(f'/{model}/{pk}')
assert resp.status_code == 200, resp.json()
assert resp.json()['blob'] == data
@pytest.mark.models(
'datasets/dtypes/binary',
)
def test_getall(model, app):
data = base64.b64encode(b'data').decode('ascii')
app.authmodel(model, ['insert', 'getall'])
resp = app.post(f'/{model}', json={'blob': data})
assert resp.status_code == 201, resp.json()
assert resp.json()['blob'] == data
resp = app.get(f'/{model}')
assert resp.status_code == 200, resp.json()
assert resp.json()['_data'][0]['blob'] == data
|
py | b40a77f28ba60948919e73eef2894e240d94114e | """
DAQmx (National Instruments, :mod:`fluidlab.instruments.daq.daqmx`)
===================================================================
.. todo:: DAQmx interface and drivers (using Comedi API?)...
Provides:
.. autofunction:: read_analog
.. autofunction:: write_analog
.. autofunction:: write_analog_end_task
.. autofunction:: measure_freq
"""
from __future__ import print_function
from collections import Iterable
from numbers import Number
from platform import platform
import time
import numpy as np
import ctypes
import six
from PyDAQmx import Task, byref, float64, int32, uInt32, bool32
from PyDAQmx import (
DAQmx_Val_Cfg_Default,
DAQmx_Val_RSE,
DAQmx_Val_NRSE,
DAQmx_Val_Diff,
DAQmx_Val_Volts,
DAQmx_AI_Coupling,
DAQmx_Val_DC,
DAQmx_Val_AC,
DAQmx_Val_GND,
DAQmx_Val_Rising,
DAQmx_Val_FiniteSamps,
DAQmx_Val_GroupByChannel,
DAQmx_Val_Hz,
DAQmx_Val_LowFreq1Ctr,
)
try:
from PyDAQmx import DAQmx_Val_PseudoDiff
except ImportError:
DAQmx_Val_PseudoDiff = None
pass
from PyDAQmx.DAQmxFunctions import AttributeNotSupportedInTaskContextError
_coupling_values = {"DC": DAQmx_Val_DC, "AC": DAQmx_Val_AC, "GND": DAQmx_Val_GND}
def _parse_resource_names(resource_names):
if isinstance(resource_names, str):
if six.PY3 and isinstance(resource_names, str):
resource_names = resource_names.encode("ascii")
resource_names = [resource_names]
elif isinstance(resource_names, Iterable):
if six.PY3 and isinstance(resource_names[0], str):
resource_names = [r.encode("ascii") for r in resource_names]
else:
raise ValueError("resource_names has to be a string or an iterable.")
nb_resources = len(resource_names)
return resource_names, nb_resources
def read_analog(
resource_names,
terminal_config,
volt_min,
volt_max,
samples_per_chan=1,
sample_rate=1,
coupling_types="DC",
output_filename=None,
verbose=False,
):
"""Read from the analog input subdevice.
Parameters
----------
resource_names: {str or iterable of str}
Analogic input identifier(s), e.g. 'Dev1/ai0'.
terminal_config: {'Diff', 'PseudoDiff', 'RSE', 'NRSE'}
A type of configuration (apply to all terminals).
volt_min : {number or iterable of numbers}
Minima for the channels.
volt_max : {number or iterable of numbers}
Maxima for the channels.
samples_per_chan: number
Number of samples per channel to read.
sample_rate: number
Sample rate for all channels (Hz).
coupling_types : {'DC', 'AC', 'GND', list of str}
Type of coupling for each resource.
output_filename: {None, str}
If specified data is output into this file instead of output
arrays.
verbose: {False, boolean}
If True, print more verbose message
"""
if output_filename is not None:
raise NotImplementedError()
# prepare resource_names
resource_names, nb_resources = _parse_resource_names(resource_names)
# prepare terminal_config
if terminal_config is None:
if verbose:
print("DAQmx: Default terminal configuration will be used.")
terminal_config = DAQmx_Val_Cfg_Default
elif terminal_config == "RSE":
if verbose:
print("DAQmx: Referenced single-ended mode")
terminal_config = DAQmx_Val_RSE
elif terminal_config == "NRSE":
if verbose:
print("DAQmx: Non-referenced single-ended mode")
terminal_config = DAQmx_Val_NRSE
elif terminal_config == "Diff":
if verbose:
print("DAQmx: Differential mode")
terminal_config = DAQmx_Val_Diff
elif terminal_config == "PseudoDiff":
if verbose:
print("DAQmx: Pseudodifferential mode")
terminal_config = DAQmx_Val_PseudoDiff
else:
raise ValueError("DAQmx: Unrecognized terminal mode")
# prepare volt_min, volt_max
if not isinstance(volt_min, Number) and len(volt_min) != nb_resources:
raise ValueError(
"volt_min has to be a number or an iterable of the same length "
"as resource_names"
)
if not isinstance(volt_max, Number) and len(volt_max) != nb_resources:
raise ValueError(
"volt_max has to be a number or an iterable of the same length "
"as resource_names"
)
if isinstance(volt_min, Number):
volt_min = [volt_min] * nb_resources
if isinstance(volt_max, Number):
volt_max = [volt_max] * nb_resources
# check samples_per_chan
if not isinstance(samples_per_chan, int) or samples_per_chan <= 0:
raise ValueError("samples_per_chan has to be a positive integer.")
# prepare coupling_types
if (
not isinstance(coupling_types, str)
and len(coupling_types) != nb_resources
):
raise ValueError(
"coupling_types has to be a number or an iterable "
"of the same length as resource_names"
)
if isinstance(coupling_types, str):
coupling_types = [coupling_types] * nb_resources
possible_keys_coupling = _coupling_values.keys()
for coupling in coupling_types:
if coupling not in possible_keys_coupling:
raise ValueError(f"Bad value in coupling_types, got: {coupling}")
if verbose:
print("DAQmx: Create Task")
task = Task()
actual_volt_min = float64()
actual_volt_max = float64()
for ir, resource in enumerate(resource_names):
if verbose:
print(
"DAQmx: Create AI Voltage Chan ("
+ str(resource)
+ " ["
+ str(volt_min[ir])
+ "V;"
+ str(volt_max[ir])
+ "V])"
)
task.CreateAIVoltageChan(
resource,
"",
terminal_config,
volt_min[ir],
volt_max[ir],
DAQmx_Val_Volts,
None,
)
# Attention SetChanAttribute doit etre dans une deuxieme boucle
# car dans le cas d'une acquisition multi-cartes, DAQmx impose que
# toutes les voies soient ajoutees a la task avant de changer
# quelque parametre
for ir, resource in enumerate(resource_names):
# check volt range
try:
task.GetAIRngHigh(resource, byref(actual_volt_max))
task.GetAIRngLow(resource, byref(actual_volt_min))
actual_volt_available = True
except AttributeError:
actual_volt_available = False # DAQmx Base
if actual_volt_available:
actual_vmin = actual_volt_min.value
actual_vmax = actual_volt_max.value
if actual_vmin != volt_min[ir] or actual_vmax != volt_max[ir]:
print(
"DAQmx: Actual range for "
+ str(resource)
+ " is actually [{:6.2f} V, {:6.2f} V].".format(
actual_vmin, actual_vmax
)
)
# set coupling
coupling_value = _coupling_values[coupling_types[ir]]
if verbose:
for name, value in _coupling_values.items():
if value == coupling_value:
print(
"DAQmx: Setting AI channel coupling ("
+ str(resource)
+ "): "
+ name
)
try:
task.SetChanAttribute(resource, DAQmx_AI_Coupling, coupling_value)
except AttributeNotSupportedInTaskContextError:
print("Coupling attribute not supported on this device")
# configure clock and DMA input buffer
if samples_per_chan > 1:
verbose_text = "DAQmx: Configure clock timing ("
if verbose:
if samples_per_chan < 1000:
verbose_text += str(samples_per_chan) + " samp/chan @ "
elif samples_per_chan < 1_000_000:
verbose_text += str(samples_per_chan / 1000) + " kSamp/chan @ "
else:
verbose_text += (
str(samples_per_chan / 1_000_000) + " MSamp/chan @ "
)
if sample_rate < 1000:
verbose_text += "%.2f Hz using OnboardClock)" % sample_rate
elif sample_rate < 1_000_000:
verbose_text += "%.2f kHz using OnboardClock)" % (
sample_rate / 1000.0
)
else:
verbose_text += "%.2f MHz using OnboardClock)" % (
sample_rate / 1e6
)
print(verbose_text)
task.CfgSampClkTiming(
"OnboardClock",
sample_rate,
DAQmx_Val_Rising,
DAQmx_Val_FiniteSamps,
samples_per_chan,
)
if verbose:
print("DAQmx: Configure DMA input buffer")
task.CfgInputBuffer(samples_per_chan)
# start task
if verbose:
if platform().startswith("Windows"):
dateformat = "%A %d %B %Y - %X (%z)"
else:
dateformat = "%A %e %B %Y - %H:%M:%S (UTC%z)"
starttime = time.time()
starttime_str = time.strftime(dateformat, time.localtime(starttime))
endtime = starttime + samples_per_chan / sample_rate
endtime_str = time.strftime(dateformat, time.localtime(endtime))
print("DAQmx: Starting acquisition: " + starttime_str)
print(
" Expected duration: %.2f min"
% (samples_per_chan / (60.0 * sample_rate))
)
print(" Expected end time: " + endtime_str)
task.StartTask()
# read data
# why 10?
timeout = float(10 * samples_per_chan / sample_rate)
buffer_size_in_samps = int(samples_per_chan * nb_resources)
data = np.zeros((buffer_size_in_samps,), dtype=np.float64)
samples_per_chan_read = int32()
task.ReadAnalogF64(
samples_per_chan,
timeout,
DAQmx_Val_GroupByChannel,
data,
buffer_size_in_samps,
byref(samples_per_chan_read),
None,
)
if verbose:
print("DAQmx: %d samples read." % samples_per_chan_read.value)
return data.reshape([nb_resources, samples_per_chan])
def write_analog(
resource_names,
sample_rate=1,
volt_min=-10.0,
volt_max=10.0,
signals=None,
blocking=True,
verbose=False,
):
"""Write analogic output
Parameters
----------
resource_name:
Analogic input identifier(s), e.g. 'Dev1/ao0'.
sample_rate: number
Frequency rate for all channels (Hz).
volt_min: {number or iterable of numbers}
Minima for the channels
volt_max: {number or iterable of numbers}
Maxima for the channels
signals: numpy.ndarray or simple scalar
The signal(s) to be output.
blocking: bool
Specifies whether to wait until the task is done before
returning. If blocking=false, then a task object is
returned. To stop the task, use the :func:`write_analog_end_task` function.
"""
# prepare resource_names
resource_names, nb_resources = _parse_resource_names(resource_names)
# prepare volt_min, volt_max
if not isinstance(volt_min, Number) and len(volt_min) != nb_resources:
raise ValueError(
"volt_min has to be a number or an iterable of the same length "
"as resource_names"
)
if not isinstance(volt_max, Number) and len(volt_max) != nb_resources:
raise ValueError(
"volt_max has to be a number or an iterable of the same length "
"as resource_names"
)
if isinstance(volt_min, Number):
volt_min = [volt_min] * nb_resources
if isinstance(volt_max, Number):
volt_max = [volt_max] * nb_resources
if isinstance(signals, (list, tuple, np.ndarray)) == False:
nb_samps_per_chan = 1
# if np.isscalar(signals)==True
elif signals.ndim == 1:
nb_samps_per_chan = len(signals)
elif signals.ndim == 2:
nb_samps_per_chan = signals.shape[1]
else:
raise ValueError(
"signals has to be a scalar or an array of dimension 1 or 2."
)
# create task
if verbose:
print("DAQmx: Create Task")
task = Task()
# create AO channels
for ir, resource in enumerate(resource_names):
if verbose:
print(
"DAQmx: Create A0 Voltage Chan ("
+ resource
+ " ["
+ str(volt_min[ir])
+ "V;"
+ str(volt_max[ir])
+ "V])"
)
task.CreateAOVoltageChan(
resource, "", volt_min[ir], volt_max[ir], DAQmx_Val_Volts, None
)
# configure clock
if nb_samps_per_chan > 1:
verbose_text = "DAQmx: Configure clock timing ("
if verbose:
if samples_per_chan < 1000:
verbose_text = (
verbose_text + str(samples_per_chan) + " samp/chan @ "
)
elif samples_per_chan < 1_000_000:
verbose_text = (
verbose_text + str(samples_per_chan / 1000) + " kSamp/chan @ "
)
else:
verbose_text = (
verbose_text
+ str(samples_per_chan / 1_000_000)
+ " MSamp/chan @ "
)
if sample_rate < 1000:
verbose_text = verbose_text + (
"%.2f Hz using OnboardClock)" % sample_rate
)
elif sample_rate < 1_000_000:
verbose_text = verbose_text + (
"%.2f kHz using OnboardClock)" % (sample_rate / 1000.0)
)
else:
verbose_text = verbose_text + (
"%.2f MHz using OnboardClock)" % (sample_rate / 1e6)
)
print(verbose_text)
task.CfgSampClkTiming(
"OnboardClock",
sample_rate,
DAQmx_Val_Rising,
DAQmx_Val_FiniteSamps,
nb_samps_per_chan,
)
# write data
written = int32()
if nb_samps_per_chan == 1:
task.WriteAnalogScalarF64(1, 10.0, signals, None)
print(" Write voltage: " + ("%.2f Volts" % signals))
# 0,10== dont autostart + timeout 10sec,
# http://zone.ni.com/reference/en-XX/help/370471AE-01/daqmxcfunc/daqmxwriteanalogscalarf64/
# task.WriteAnalogF64(
# nb_samps_per_chan, 0, 10.0, DAQmx_Val_GroupByChannel,
# signals.ravel(), byref(written), None)
else:
task.WriteAnalogF64(
nb_samps_per_chan,
0,
10.0,
DAQmx_Val_GroupByChannel,
signals.ravel(),
byref(written),
None,
)
# 0,10== dont autostart + timeout 10sec,
# http://zone.ni.com/reference/en-XX/help/370471AE-01/daqmxcfunc/daqmxwriteanalogf64/
# start task
if verbose:
if platform().startswith("Windows"):
dateformat = "%A %d %B %Y - %X (%z)"
else:
dateformat = "%A %e %B %Y - %H:%M:%S (UTC%z)"
starttime = time.time()
starttime_str = time.strftime(dateformat, time.localtime(starttime))
endtime = starttime + nb_samps_per_chan / sample_rate
endtime_str = time.strftime(dateformat, time.localtime(endtime))
print("DAQmx: Starting write Task: " + starttime_str)
print(
" Expected duration: %.2f min"
% (nb_samps_per_chan / (60.0 * sample_rate))
)
print(" Expected end time: " + endtime_str)
task.StartTask()
if blocking:
task.WaitUntilTaskDone(1.1 * nb_samps_per_chan / sample_rate)
task.StopTask()
else:
if verbose:
print("DAQmx write done")
return task
def write_analog_end_task(task, timeout=0.0):
"""This function ends a writing task that has been created with blocking=False.
Parameters
----------
task : PyDAQmx.Task
The task to end.
timeout : number
Time (in s) to wait before stopping the task if it is not done.
"""
task.WaitUntilTaskDone(timeout)
task.StopTask()
task.ClearTask()
def measure_freq(resource_name, freq_min=1, freq_max=1000):
"""Read analogic output
Parameters
----------
resource_name: str
Analogic input identifier, e.g. 'Dev1/ctr0'.
freq_min : number
The minimum frequency (Hz) that you expect to measure.
freq_max : number
The maximum frequency (Hz) that you expect to measure.
"""
# create task
task = Task()
# it seems that this argument is actually not used with the method
# DAQmx_Val_LowFreq1Ctr.
measure_time = 0.5
task.CreateCIFreqChan(
resource_name,
"",
freq_min,
freq_max,
DAQmx_Val_Hz,
DAQmx_Val_Rising,
DAQmx_Val_LowFreq1Ctr,
measure_time,
1,
"",
)
task.StartTask()
timeout = 10
result = float64()
null = ctypes.POINTER(ctypes.c_uint)()
task.ReadCounterScalarF64(timeout, byref(result), None)
return result.value
if __name__ == "__main__":
# data = read_analog(
# resource_names='dev1/ai0',
# terminal_config='Diff',
# volt_min=-10,
# volt_max=10,
# samples_per_chan=10,
# sample_rate=10,
# coupling_types='DC')
# data = read_analog(
# resource_names=['dev1/ai{}'.format(ic) for ic in range(4)],
# terminal_config='Diff',
# volt_min=-10,
# volt_max=10,
# samples_per_chan=10,
# sample_rate=10,
# coupling_types='DC')
# signals = np.cos(np.linspace(0, 2*np.pi, 100))
# write_analog('dev1/ao0', 10, signals, blocking=True)
signals = np.cos(np.linspace(0, 2 * np.pi, 100))
signals = np.vstack((signals, signals + 2))
write_analog([f"dev1/ao{i}" for i in (0, 2)], 10, signals, blocking=True)
|
py | b40a78a22ecf4e188cc5eaee934265ff393254ea | import numpy as np
class EvaluateTool(object):
def __init__(self, args):
self.args = args
def evaluate(self, preds, golds, section):
summary = {}
all_match = []
for pred, gold_item in zip(preds, golds):
match_or_not = pred == gold_item['seq_out']
all_match.append(match_or_not)
summary["all"] = float(np.mean(all_match))
return summary
|
py | b40a78d427015327d584db86a202d7aab9726adc | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Embedding
import matplotlib.pyplot as plt
import tokenizer
import embeddings
#callbacks to be used during training at the end of each epoch
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
patience=3, min_delta=0.0001)
#LSTM_model
tf.keras.backend.clear_session()
model = keras.models.Sequential([
Embedding(embeddings.num_tokens,
embeddings.embedding_dim,
embeddings_initializer=keras.initializers.Constant(embeddings.embedding_matrix),
mask_zero=True, input_shape=[None], trainable=False),
keras.layers.Bidirectional(keras.layers.LSTM(256, dropout=0.4)),
keras.layers.Dense(12, activation="softmax")
])
print("MODEL SUMMARY :- ",model.summary())
optimizer = keras.optimizers.Adam(learning_rate=0.001)
model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
history = model.fit(tokenizer.train_set, tokenizer.train_label,
batch_size = 32,
steps_per_epoch=len(tokenizer.X_train) // 32,
validation_data = (tokenizer.val_set, tokenizer.val_label),
validation_steps = len(tokenizer.val_set)//32, epochs=20,
callbacks=early_stop)
#Plot loss curve
fig = plt.figure(figsize=(10,10))
# Plot accuracy
plt.subplot(221)
plt.plot(history.history['accuracy'],'bo-', label = "acc")
plt.plot(history.history['val_accuracy'], 'ro-', label = "val_acc")
plt.title("train_accuracy vs val_accuracy")
plt.ylabel("accuracy")
plt.xlabel("epochs")
plt.grid(True)
plt.legend()
# Plot loss function
plt.subplot(222)
plt.plot(history.history['loss'],'bo-', label = "loss")
plt.plot(history.history['val_loss'], 'ro-', label = "val_loss")
plt.title("train_loss vs val_loss")
plt.ylabel("loss")
plt.xlabel("epochs")
plt.grid(True)
plt.legend()
|
py | b40a7969d938aa1ac0a98f04ef1209e392e1384f | def verify_device(db_obj, query_obj, device) -> bool:
"""
Checks to see if a device has the same id as that recorded within the
db
:param db_obj:
:param query_obj:
:return: boolean
"""
result = db_obj.read_one(query_obj.verify_device(device))
if result:
return True
else:
return False
def get_device_id(location, db, query):
"""
Checks to see if a device has the same id as that recorded within the
db
:param query_obj:
:param db_obj:
:param location:
:return: device_id
"""
result = db.read_one(query.get_device_id(location))
if result:
return result['device_id']
else:
return None
|
py | b40a7b7cac05a970f7705e16cf882d06a21660dd | #!/usr/bin/python
'''
DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
This material is based upon work supported by the Assistant Secretary of Defense for
Research and Engineering under Air Force Contract No. FA8721-05-C-0002 and/or
FA8702-15-D-0001. Any opinions, findings, conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views of the
Assistant Secretary of Defense for Research and Engineering.
Copyright 2015 Massachusetts Institute of Technology.
The software/firmware is provided to you on an As-Is basis
Delivered to the US Government with Unlimited Rights, as defined in DFARS Part
252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government
rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed
above. Use of this work other than as specifically authorized by the U.S. Government may
violate any copyrights that exist in this work.
'''
from urlparse import urlparse
import json
import base64
import time
import common
import registrar_client
import os
import crypto
import ssl
import socket
import ca_util
import sqlite3
import revocation_notifier
import keylime_sqlite
import ConfigParser
import tpm_obj
from tpm_abstract import TPM_Utilities, Hash_Algorithms, Encrypt_Algorithms, Sign_Algorithms
# setup logging
logger = common.init_logging('cloudverifier_common')
# setup config
config = ConfigParser.SafeConfigParser()
config.read(common.CONFIG_FILE)
class CloudAgent_Operational_State:
REGISTERED = 0
START = 1
SAVED = 2
GET_QUOTE = 3
GET_QUOTE_RETRY = 4
PROVIDE_V = 5
PROVIDE_V_RETRY = 6
FAILED = 7
TERMINATED = 8
INVALID_QUOTE = 9
TENANT_FAILED = 10
STR_MAPPINGS = {
0 : "Registered",
1 : "Start",
2 : "Saved",
3 : "Get Quote",
4 : "Get Quote (retry)",
5 : "Provide V",
6 : "Provide V (retry)",
7 : "Failed",
8 : "Terminated",
9 : "Invalid Quote",
10 : "Tenant Quote Failed"
}
class Timer(object):
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
if self.verbose:
print 'elapsed time: %f ms' % self.msecs
def init_mtls(section='cloud_verifier',generatedir='cv_ca'):
if not config.getboolean('general',"enable_tls"):
logger.warning("TLS is currently disabled, keys will be sent in the clear! Should only be used for testing.")
return None
logger.info("Setting up TLS...")
my_cert = config.get(section, 'my_cert')
ca_cert = config.get(section, 'ca_cert')
my_priv_key = config.get(section, 'private_key')
my_key_pw = config.get(section,'private_key_pw')
tls_dir = config.get(section,'tls_dir')
if tls_dir =='generate':
if my_cert!='default' or my_priv_key !='default' or ca_cert !='default':
raise Exception("To use tls_dir=generate, options ca_cert, my_cert, and private_key must all be set to 'default'")
if generatedir[0]!='/':
generatedir =os.path.abspath('%s/%s'%(common.WORK_DIR,generatedir))
tls_dir = generatedir
ca_path = "%s/cacert.crt"%(tls_dir)
if os.path.exists(ca_path):
logger.info("Existing CA certificate found in %s, not generating a new one"%(tls_dir))
else:
logger.info("Generating a new CA in %s and a client certificate for connecting"%tls_dir)
logger.info("use keylime_ca -d %s to manage this CA"%tls_dir)
if not os.path.exists(tls_dir):
os.makedirs(tls_dir,0o700)
if my_key_pw=='default':
logger.warning("CAUTION: using default password for CA, please set private_key_pw to a strong password")
ca_util.setpassword(my_key_pw)
ca_util.cmd_init(tls_dir)
ca_util.cmd_mkcert(tls_dir, socket.gethostname())
ca_util.cmd_mkcert(tls_dir, 'client')
if tls_dir == 'CV':
if section !='registrar':
raise Exception("You only use the CV option to tls_dir for the registrar not %s"%section)
tls_dir = os.path.abspath('%s/%s'%(common.WORK_DIR,'cv_ca'))
if not os.path.exists("%s/cacert.crt"%(tls_dir)):
raise Exception("It appears that the verifier has not yet created a CA and certificates, please run the verifier first")
# if it is relative path, convert to absolute in WORK_DIR
if tls_dir[0]!='/':
tls_dir = os.path.abspath('%s/%s'%(common.WORK_DIR,tls_dir))
if ca_cert == 'default':
ca_path = "%s/cacert.crt"%(tls_dir)
else:
ca_path = "%s/%s"%(tls_dir,ca_cert)
if my_cert=='default':
my_cert = "%s/%s-cert.crt"%(tls_dir,socket.gethostname())
else:
my_cert = "%s/%s"%(tls_dir,my_cert)
if my_priv_key=='default':
my_priv_key = "%s/%s-private.pem"%(tls_dir,socket.gethostname())
else:
my_priv_key = "%s/%s"%(tls_dir,my_priv_key)
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.load_verify_locations(cafile=ca_path)
context.load_cert_chain(certfile=my_cert,keyfile=my_priv_key,password=my_key_pw)
context.verify_mode = ssl.CERT_REQUIRED
return context
def process_quote_response(agent, json_response):
"""Validates the response from the Cloud agent.
This method invokes an Registrar Server call to register, and then check the quote.
"""
received_public_key = None
quote = None
# in case of failure in response content do not continue
try:
received_public_key = json_response.get("pubkey",None)
quote = json_response["quote"]
ima_measurement_list = json_response.get("ima_measurement_list",None)
logger.debug("received quote: %s"%quote)
logger.debug("for nonce: %s"%agent['nonce'])
logger.debug("received public key: %s"%received_public_key)
logger.debug("received ima_measurement_list %s"%(ima_measurement_list!=None))
except Exception:
return None
# if no public key provided, then ensure we have cached it
if received_public_key is None:
if agent.get('public_key',"") == "" or agent.get('b64_encrypted_V',"")=="":
logger.error("agent did not provide public key and no key or encrypted_v was cached at CV")
return False
agent['provide_V'] = False
received_public_key = agent['public_key']
if agent.get('registrar_keys',"") is "":
registrar_client.init_client_tls(config,'cloud_verifier')
registrar_keys = registrar_client.getKeys(config.get("general","registrar_ip"),config.get("general","registrar_tls_port"),agent['agent_id'])
if registrar_keys is None:
logger.warning("AIK not found in registrar, quote not validated")
return False
agent['registrar_keys'] = registrar_keys
tpm_version = json_response.get('tpm_version')
tpm = tpm_obj.getTPM(need_hw_tpm=False,tpm_version=tpm_version)
hash_alg = json_response.get('hash_alg')
enc_alg = json_response.get('enc_alg')
sign_alg = json_response.get('sign_alg')
# Update chosen tpm and algorithms
agent['tpm_version'] = tpm_version
agent['hash_alg'] = hash_alg
agent['enc_alg'] = enc_alg
agent['sign_alg'] = sign_alg
# Ensure hash_alg is in accept_tpm_hash_alg list
if not Hash_Algorithms.is_accepted(hash_alg, agent['accept_tpm_hash_algs']):
raise Exception("TPM Quote is using an unaccepted hash algorithm: %s"%hash_alg)
# Ensure enc_alg is in accept_tpm_encryption_algs list
if not Encrypt_Algorithms.is_accepted(enc_alg, agent['accept_tpm_encryption_algs']):
raise Exception("TPM Quote is using an unaccepted encryption algorithm: %s"%enc_alg)
# Ensure sign_alg is in accept_tpm_encryption_algs list
if not Sign_Algorithms.is_accepted(sign_alg, agent['accept_tpm_signing_algs']):
raise Exception("TPM Quote is using an unaccepted signing algorithm: %s"%sign_alg)
if tpm.is_deep_quote(quote):
validQuote = tpm.check_deep_quote(agent['nonce'],
received_public_key,
quote,
agent['registrar_keys']['aik'],
agent['registrar_keys']['provider_keys']['aik'],
agent['vtpm_policy'],
agent['tpm_policy'],
ima_measurement_list,
agent['ima_whitelist'])
else:
validQuote = tpm.check_quote(agent['nonce'],
received_public_key,
quote,
agent['registrar_keys']['aik'],
agent['tpm_policy'],
ima_measurement_list,
agent['ima_whitelist'],
hash_alg)
if not validQuote:
return False
# set a flag so that we know that the agent was verified once.
# we only issue notifications for agents that were at some point good
agent['first_verified']=True
# has public key changed? if so, clear out b64_encrypted_V, it is no longer valid
if received_public_key != agent.get('public_key',""):
agent['public_key'] = received_public_key
agent['b64_encrypted_V'] = ""
agent['provide_V'] = True
# ok we're done
return validQuote
def prepare_v(agent):
# be very careful printing K, U, or V as they leak in logs stored on unprotected disks
if common.INSECURE_DEBUG:
logger.debug("b64_V (non encrypted): " + agent['v'])
if agent.get('b64_encrypted_V',"") !="":
b64_encrypted_V = agent['b64_encrypted_V']
logger.debug("Re-using cached encrypted V")
else:
# encrypt V with the public key
b64_encrypted_V = base64.b64encode(crypto.rsa_encrypt(crypto.rsa_import_pubkey(agent['public_key']),str(base64.b64decode(agent['v']))))
agent['b64_encrypted_V'] = b64_encrypted_V
logger.debug("b64_encrypted_V:" + b64_encrypted_V)
post_data = {
'encrypted_key': b64_encrypted_V
}
v_json_message = json.dumps(post_data)
return v_json_message
def prepare_get_quote(agent):
"""This method encapsulates the action required to invoke a quote request on the Cloud Agent.
This method is part of the polling loop of the thread launched on Tenant POST.
"""
agent['nonce'] = TPM_Utilities.random_password(20)
params = {
'nonce': agent['nonce'],
'mask': agent['tpm_policy']['mask'],
'vmask': agent['vtpm_policy']['mask'],
}
return params
def process_get_status(agent):
if isinstance(agent['ima_whitelist'],dict) and 'whitelist' in agent['ima_whitelist']:
wl_len = len(agent['ima_whitelist']['whitelist'])
else:
wl_len = 0
response = {'operational_state':agent['operational_state'],
'v':agent['v'],
'ip':agent['ip'],
'port':agent['port'],
'tpm_policy':agent['tpm_policy'],
'vtpm_policy':agent['vtpm_policy'],
'metadata':agent['metadata'],
'ima_whitelist_len':wl_len,
'tpm_version':agent['tpm_version'],
'accept_tpm_hash_algs':agent['accept_tpm_hash_algs'],
'accept_tpm_encryption_algs':agent['accept_tpm_encryption_algs'],
'accept_tpm_signing_algs':agent['accept_tpm_signing_algs'],
'hash_alg':agent['hash_alg'],
'enc_alg':agent['enc_alg'],
'sign_alg':agent['sign_alg'],
}
return response
def get_query_tag_value(path, query_tag):
"""This is a utility method to query for specific the http parameters in the uri.
Returns the value of the parameter, or None if not found."""
data = { }
parsed_path = urlparse(path)
query_tokens = parsed_path.query.split('&')
# find the 'ids' query, there can only be one
for tok in query_tokens:
query_tok = tok.split('=')
query_key = query_tok[0]
if query_key is not None and query_key == query_tag:
# ids tag contains a comma delimited list of ids
data[query_tag] = query_tok[1]
break
return data.get(query_tag,None)
# sign a message with revocation key. telling of verification problem
def notifyError(agent,msgtype='revocation'):
if not config.getboolean('cloud_verifier', 'revocation_notifier'):
return
# prepare the revocation message:
revocation = {
'type':msgtype,
'ip':agent['ip'],
'port':agent['port'],
'tpm_policy':agent['tpm_policy'],
'vtpm_policy':agent['vtpm_policy'],
'metadata':agent['metadata'],
}
revocation['event_time'] = time.asctime()
tosend={'msg': json.dumps(revocation)}
#also need to load up private key for signing revocations
if agent['revocation_key']!="":
global signing_key
signing_key = crypto.rsa_import_privkey(agent['revocation_key'])
tosend['signature']=crypto.rsa_sign(signing_key,tosend['msg'])
#print "verified? %s"%crypto.rsa_verify(signing_key, tosend['signature'], tosend['revocation'])
else:
tosend['siganture']="none"
revocation_notifier.notify(tosend)
# ===== sqlite stuff =====
def init_db(db_filename):
# in the form key, SQL type
cols_db = {
'agent_id': 'TEXT PRIMARY_KEY',
'v': 'TEXT',
'ip': 'TEXT',
'port': 'INT',
'operational_state': 'INT',
'public_key': 'TEXT',
'tpm_policy' : 'TEXT',
'vtpm_policy' : 'TEXT',
'metadata' : 'TEXT',
'ima_whitelist' : 'TEXT',
'revocation_key': 'TEXT',
'tpm_version': 'INT',
'accept_tpm_hash_algs': 'TEXT',
'accept_tpm_encryption_algs': 'TEXT',
'accept_tpm_signing_algs': 'TEXT',
'hash_alg': 'TEXT',
'enc_alg': 'TEXT',
'sign_alg': 'TEXT',
}
# these are the columns that contain json data and need marshalling
json_cols_db = ['tpm_policy','vtpm_policy','metadata','ima_whitelist','accept_tpm_hash_algs', 'accept_tpm_encryption_algs', 'accept_tpm_signing_algs']
# in the form key : default value
exclude_db = {
'registrar_keys': '',
'nonce': '',
'b64_encrypted_V': '',
'provide_V': True,
'num_retries': 0,
'pending_event': None,
'first_verified':False,
}
return keylime_sqlite.KeylimeDB(db_filename,cols_db,json_cols_db,exclude_db)
|
py | b40a7bbb76cac40929d1c9ef320dab96963586ef | # Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
# Copyright 2018 New Vector Ltd
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
from typing import Set
logger = logging.getLogger(__name__)
# REQUIREMENTS is a simple list of requirement specifiers[1], and must be
# installed. It is passed to setup() as install_requires in setup.py.
#
# CONDITIONAL_REQUIREMENTS is the optional dependencies, represented as a dict
# of lists. The dict key is the optional dependency name and can be passed to
# pip when installing. The list is a series of requirement specifiers[1] to be
# installed when that optional dependency requirement is specified. It is passed
# to setup() as extras_require in setup.py
#
# Note that these both represent runtime dependencies (and the versions
# installed are checked at runtime).
#
# Also note that we replicate these constraints in the Synapse Dockerfile while
# pre-installing dependencies. If these constraints are updated here, the same
# change should be made in the Dockerfile.
#
# [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers.
REQUIREMENTS = [
# we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
"jsonschema>=3.0.0",
# frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41
"frozendict>=1,!=2.1.2",
"unpaddedbase64>=1.1.0",
"canonicaljson>=1.4.0",
# we use the type definitions added in signedjson 1.1.
"signedjson>=1.1.0",
"pynacl>=1.2.1",
"idna>=2.5",
# validating SSL certs for IP addresses requires service_identity 18.1.
"service_identity>=18.1.0",
# Twisted 18.9 introduces some logger improvements that the structured
# logger utilises
"Twisted>=18.9.0",
"treq>=15.1",
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
"pyopenssl>=16.0.0",
"pyyaml>=3.11",
"pyasn1>=0.1.9",
"pyasn1-modules>=0.0.7",
"bcrypt>=3.1.0",
"pillow>=5.4.0",
"sortedcontainers>=1.4.4",
"pymacaroons>=0.13.0",
"msgpack>=0.5.2",
"phonenumbers>=8.2.0",
# we use GaugeHistogramMetric, which was added in prom-client 0.4.0.
"prometheus_client>=0.4.0",
# we use `order`, which arrived in attrs 19.2.0.
# Note: 21.1.0 broke `/sync`, see #9936
"attrs>=19.2.0,!=21.1.0",
"netaddr>=0.7.18",
"Jinja2>=2.9",
"bleach>=1.4.3",
"typing-extensions>=3.7.4",
# We enforce that we have a `cryptography` version that bundles an `openssl`
# with the latest security patches.
"cryptography>=3.4.7",
# ijson 3.1.4 fixes a bug with "." in property names
"ijson>=3.1.4",
"matrix-common~=1.1.0",
# We need packaging.requirements.Requirement, added in 16.1.
"packaging>=16.1",
]
CONDITIONAL_REQUIREMENTS = {
"matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"],
"postgres": [
# we use execute_values with the fetch param, which arrived in psycopg 2.8.
"psycopg2>=2.8 ; platform_python_implementation != 'PyPy'",
"psycopg2cffi>=2.8 ; platform_python_implementation == 'PyPy'",
"psycopg2cffi-compat==1.1 ; platform_python_implementation == 'PyPy'",
],
"saml2": [
"pysaml2>=4.5.0",
],
"oidc": ["authlib>=0.14.0"],
# systemd-python is necessary for logging to the systemd journal via
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
"systemd": ["systemd-python>=231"],
"url_preview": ["lxml>=4.2.0"],
"sentry": ["sentry-sdk>=0.7.2"],
"opentracing": ["jaeger-client>=4.0.0", "opentracing>=2.2.0"],
"jwt": ["pyjwt>=1.6.4"],
# hiredis is not a *strict* dependency, but it makes things much faster.
# (if it is not installed, we fall back to slow code.)
"redis": ["txredisapi>=1.4.7", "hiredis"],
# Required to use experimental `caches.track_memory_usage` config option.
"cache_memory": ["pympler"],
}
ALL_OPTIONAL_REQUIREMENTS: Set[str] = set()
for name, optional_deps in CONDITIONAL_REQUIREMENTS.items():
# Exclude systemd as it's a system-based requirement.
# Exclude lint as it's a dev-based requirement.
if name not in ["systemd"]:
ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS
# ensure there are no double-quote characters in any of the deps (otherwise the
# 'pip install' incantation in DependencyException will break)
for dep in itertools.chain(
REQUIREMENTS,
*CONDITIONAL_REQUIREMENTS.values(),
):
if '"' in dep:
raise Exception(
"Dependency `%s` contains double-quote; use single-quotes instead" % (dep,)
)
def list_requirements():
return list(set(REQUIREMENTS) | ALL_OPTIONAL_REQUIREMENTS)
if __name__ == "__main__":
import sys
sys.stdout.writelines(req + "\n" for req in list_requirements())
|
py | b40a7c42ce0fa63aa276af221d012d2db9591dbc | import pandas as pd
import numpy as np
import scipy.io
import random, math
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def Plot2D(T, title, x, y, num_to_plot=40):
# This method picks a bunch of random samples (images in your case)
# to plot onto the chart:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.set_xlabel('Component: {0}'.format(x))
ax.set_ylabel('Component: {0}'.format(y))
x_size = (max(T[:,x]) - min(T[:,x])) * 0.08
y_size = (max(T[:,y]) - min(T[:,y])) * 0.08
for i in range(num_to_plot):
img_num = int(random.random() * num_images)
x0, y0 = T[img_num,x]-x_size/2., T[img_num,y]-y_size/2.
x1, y1 = T[img_num,x]+x_size/2., T[img_num,y]+y_size/2.
img = df.iloc[img_num,:].reshape(num_pixels, num_pixels)
ax.imshow(img, aspect='auto', cmap=plt.cm.gray, interpolation='nearest', zorder=100000, extent=(x0, x1, y0, y1))
# It also plots the full scatter:
ax.scatter(T[:,x],T[:,y], marker='.',alpha=0.7)
# A .MAT file is a .MATLAB file. The faces dataset could have came
# in through .png images, but we'll show you how to do that in
# anither lab. For now, you'll see how to import .mats:
mat = scipy.io.loadmat('Datasets/face_data.mat')
df = pd.DataFrame(mat['images']).T
num_images, num_pixels = df.shape
num_pixels = int(math.sqrt(num_pixels))
# Rotate the pictures, so we don't have to crane our necks:
for i in range(num_images):
df.loc[i,:] = df.loc[i,:].reshape(num_pixels, num_pixels).T.reshape(-1)
#
# TODO: Implement PCA here. Reduce the dataframe df down
# to THREE components. Once you've done that, call Plot2D.
#
# The format is: Plot2D(T, title, x, y, num_to_plot=40):
# T is your transformed data, NDArray.
# title is your chart title
# x is the principal component you want displayed on the x-axis, Can be 0 or 1
# y is the principal component you want displayed on the y-axis, Can be 1 or 2
#
# .. your code here ..
from sklearn.decomposition import PCA
pca = PCA(n_components=3)
pca.fit(df)
T = pca.transform(df)
Plot2D(T, "PCA", 1, 2, num_to_plot=40)
#
# TODO: Implement Isomap here. Reduce the dataframe df down
# to THREE components. Once you've done that, call Plot2D using
# the first two components.
#
# .. your code here ..
from sklearn import manifold
iso = manifold.Isomap(n_neighbors=4, n_components=3)
iso.fit(df)
manifold = iso.transform(df)
Plot2D(manifold, "ISOMAP", 1, 2, num_to_plot=40)
#
# TODO: If you're up for a challenge, draw your dataframes in 3D
# Even if you're not, just do it anyway.
#
# .. your code here ..
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.scatter(df.iloc[:,0], df.iloc[:,1], df.iloc[:,2], c='red')
plt.show()
|
py | b40a7de0ce88124007e6c34044528c13e5ecfd97 | import os
import os.path as osp
import glob
import shutil
DS_FILES = {
'CR2': '{ds_dir}/auxiliary/source/CR2/{img_id}.CR2',
'json_markup': '{ds_dir}/auxiliary/source/JPG.JSON/{img_id}.jpg.json',
'json_exif': '{ds_dir}/auxiliary/extra/exif/{img_id}.json',
'json_gt': '{ds_dir}/auxiliary/extra/gt_json/{img_id}.json',
'png': '{ds_dir}/PNG/{img_id}.png',
'jpg': '{ds_dir}/JPG/{img_id}.jpg',
}
DS_COMMON_FILES = {
'gt': '{ds_dir}/gt.csv',
'properties': '{ds_dir}/properties.csv',
'cam_estimation': '{ds_dir}/auxiliary/extra/cam_estimation.csv',
'exif_stat': '{ds_dir}/auxiliary/extra/exif_stat.csv',
}
def get_path(ds_dir, img_id, data_type):
if data_type not in DS_FILES:
raise KeyError('{} (Available keys: {}'.format(data_type, str(DS_FILES.keys())))
return DS_FILES[data_type].format(
ds_dir=ds_dir,
img_id=img_id,
)
def get_common_path(ds_dir, data_type):
if data_type not in DS_COMMON_FILES:
raise KeyError('{} (Available keys: {}'.format(data_type, str(DS_COMMON_FILES.keys())))
return DS_COMMON_FILES[data_type].format(
ds_dir=ds_dir
)
def prepare_generated_dirs(ds_dir, overwrite=False):
for path in [
'{ds_dir}/auxiliary/extra/exif',
'{ds_dir}/auxiliary/extra/gt_json',
'{ds_dir}/PNG',
'{ds_dir}/JPG',
]:
path = path.format(ds_dir=ds_dir)
if overwrite and osp.exists(path):
shutil.rmtree(path)
assert not osp.exists(path), 'Error! DS dirs (for example, {}) already exist. \nUse --overwrite to explicitly force its removing'.format(path)
os.makedirs(path)
for path in [
'{ds_dir}/gt.csv',
'{ds_dir}/properties.csv',
'{ds_dir}/auxiliary/extra/cam_estimation.csv',
'{ds_dir}/auxiliary/extra/exif_stat.csv',
]:
path = path.format(ds_dir=ds_dir)
if overwrite and osp.exists(path):
os.remove(path)
assert not osp.exists(path), 'Error! File {} already exists. \nUse --overwrite to explicitly force its removing'.format(path)
def get_img_ids(ds_dir):
json_jpg_files = glob.glob(DS_FILES['json_markup'].format(
ds_dir=ds_dir,
img_id='*')
)
def _img_id(path):
return osp.basename(path).split('.')[0]
img_ids = [_img_id(f) for f in json_jpg_files]
return img_ids
|
py | b40a7f46bcf5f4ca747f80b2d4be2c28e94675b4 | import operator_benchmark as op_bench
from pt import ( # noqa
unary_test, # noqa
)
import benchmark_all_other_test # noqa
import benchmark_all_quantized_test # noqa
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|
py | b40a7ff691718a4a48ea5c8798bf8baa710e683e | # -*- coding: utf-8 -*-
# cython: language_level=3
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021 davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Provides the valid routes that can be used on the API and the CDN."""
from __future__ import annotations
__all__: typing.List[str] = ["CompiledRoute", "Route", "CDNRoute"]
import math
import re
import typing
import urllib.parse
import attr
from hikari import files
from hikari.internal import attr_extensions
from hikari.internal import data_binding
HASH_SEPARATOR: typing.Final[str] = ";"
PARAM_REGEX: typing.Final[typing.Pattern[str]] = re.compile(r"{(\w+)}")
MAJOR_PARAM_COMBOS: typing.Mapping[typing.FrozenSet[str], typing.Callable[[typing.Mapping[str, str]], str]] = {
frozenset(("channel",)): lambda d: d["channel"],
frozenset(("guild",)): lambda d: d["guild"],
frozenset(("webhook", "token")): lambda d: d["webhook"] + ":" + d["token"],
}
# This could be frozen, except attrs' docs advise against this for performance
# reasons when using slotted classes.
@attr_extensions.with_copy
@attr.define(hash=True, weakref_slot=False)
@typing.final
class CompiledRoute:
"""A compiled representation of a route to a specific resource.
This is a similar representation to what `Route` provides, except
`Route` is treated as a template, this is treated as an instance.
"""
major_param_hash: str = attr.field()
"""The major parameters in a bucket hash-compatible representation."""
route: Route = attr.field()
"""The route this compiled route was created from."""
compiled_path: str = attr.field()
"""The compiled route path to use."""
@property
def method(self) -> str:
"""Return the HTTP method of this compiled route."""
return self.route.method
def create_url(self, base_url: str) -> str:
"""Create the full URL with which you can make a request.
Parameters
----------
base_url : builtins.str
The base of the URL to prepend to the compiled path.
Returns
-------
builtins.str
The full URL for the route.
"""
return base_url + self.compiled_path
def create_real_bucket_hash(self, initial_bucket_hash: str) -> str:
"""Create a full bucket hash from a given initial hash.
The result of this hash will be decided by the value of the major
parameters passed to the route during the compilation phase.
Parameters
----------
initial_bucket_hash : builtins.str
The initial bucket hash provided by Discord in the HTTP headers
for a given response.
Returns
-------
builtins.str
The input hash amalgamated with a hash code produced by the
major parameters in this compiled route instance.
"""
return initial_bucket_hash + HASH_SEPARATOR + self.major_param_hash
def __str__(self) -> str:
return f"{self.method} {self.compiled_path}"
@attr_extensions.with_copy
@attr.define(hash=True, init=False, weakref_slot=False)
@typing.final
class Route:
"""A template used to create compiled routes for specific parameters.
These compiled routes are used to identify rate limit buckets. Compiled
routes may have a single major parameter.
Parameters
----------
method : builtins.str
The HTTP method
path_template : builtins.str
The template string for the path to use.
"""
method: str = attr.field()
"""The HTTP method."""
path_template: str = attr.field()
"""The template string used for the path."""
major_params: typing.Optional[typing.FrozenSet[str]] = attr.field(hash=False, eq=False)
"""The optional major parameter name combination for this endpoint."""
def __init__(self, method: str, path_template: str) -> None:
self.method = method
self.path_template = path_template
self.major_params = None
match = PARAM_REGEX.findall(path_template)
for major_param_combo in MAJOR_PARAM_COMBOS.keys():
if major_param_combo.issubset(match):
self.major_params = major_param_combo
break
def compile(self, **kwargs: typing.Any) -> CompiledRoute:
"""Generate a formatted `CompiledRoute` for this route.
This takes into account any URL parameters that have been passed.
Parameters
----------
**kwargs : typing.Any
Any parameters to interpolate into the route path.
Returns
-------
CompiledRoute
The compiled route.
"""
data = data_binding.StringMapBuilder()
for k, v in kwargs.items():
data.put(k, v)
return CompiledRoute(
route=self,
compiled_path=self.path_template.format_map(data),
major_param_hash=MAJOR_PARAM_COMBOS[self.major_params](data) if self.major_params else "-",
)
def __str__(self) -> str:
return self.path_template
def _cdn_valid_formats_converter(values: typing.AbstractSet[str]) -> typing.FrozenSet[str]:
return frozenset(v.lower() for v in values)
@attr_extensions.with_copy
@attr.define(hash=True, weakref_slot=False)
@typing.final
class CDNRoute:
"""Route implementation for a CDN resource."""
path_template: str = attr.field()
"""Template string for this endpoint."""
valid_formats: typing.AbstractSet[str] = attr.field(
converter=_cdn_valid_formats_converter,
eq=False,
hash=False,
repr=False,
)
"""Valid file formats for this endpoint."""
@valid_formats.validator
def _(self, _: attr.Attribute[typing.AbstractSet[str]], values: typing.AbstractSet[str]) -> None:
if not values:
raise ValueError(f"{self.path_template} must have at least one valid format set")
sizable: bool = attr.field(default=True, kw_only=True, repr=False, hash=False, eq=False)
"""`builtins.True` if a `size` param can be specified, or `builtins.False` otherwise."""
def compile(
self,
base_url: str,
*,
file_format: str,
size: typing.Optional[int] = None,
**kwargs: typing.Any,
) -> str:
"""Generate a full CDN url from this endpoint.
Parameters
----------
base_url : builtins.str
The base URL for the CDN. The generated route is concatenated onto
this.
file_format : builtins.str
The file format to use for the asset.
size : typing.Optional[builtins.int]
The custom size query parameter to set. If `builtins.None`,
it is not passed.
**kwargs : typing.Any
Parameters to interpolate into the path template.
Returns
-------
builtins.str
The full asset URL.
Raises
------
builtins.TypeError
If a GIF is requested, but the asset is not animated;
if an invalid file format for the endpoint is passed; or if a `size`
is passed but the route is not `sizable`.
builtins.ValueError
If `size` is specified, but is not an integer power of `2` between
`16` and `4096` inclusive or is negative.
"""
file_format = file_format.lower()
if file_format not in self.valid_formats:
raise TypeError(
f"{file_format} is not a valid format for this asset. Valid formats are: "
+ ", ".join(self.valid_formats)
)
if "hash" in kwargs and not kwargs["hash"].startswith("a_") and file_format == GIF:
raise TypeError("This asset is not animated, so cannot be retrieved as a GIF")
# Make URL-safe first.
kwargs = {k: urllib.parse.quote(str(v)) for k, v in kwargs.items()}
url = base_url + self.path_template.format(**kwargs) + f".{file_format}"
if size is not None:
if not self.sizable:
raise TypeError("This asset cannot be resized.")
if size < 0:
raise ValueError("size must be positive")
size_power = math.log2(size)
if size_power.is_integer() and 2 <= size_power <= 16:
url += "?"
url += urllib.parse.urlencode({"size": str(size)})
else:
raise ValueError("size must be an integer power of 2 between 16 and 4096 inclusive")
return url
def compile_to_file(
self,
base_url: str,
*,
file_format: str,
size: typing.Optional[int] = None,
**kwargs: typing.Any,
) -> files.URL:
"""Perform the same as `compile`, but return the URL as a `files.URL`."""
return files.URL(self.compile(base_url, file_format=file_format, size=size, **kwargs))
GET: typing.Final[str] = "GET"
POST: typing.Final[str] = "POST"
PATCH: typing.Final[str] = "PATCH"
DELETE: typing.Final[str] = "DELETE"
PUT: typing.Final[str] = "PUT"
# Channels
GET_CHANNEL: typing.Final[Route] = Route(GET, "/channels/{channel}")
PATCH_CHANNEL: typing.Final[Route] = Route(PATCH, "/channels/{channel}")
DELETE_CHANNEL: typing.Final[Route] = Route(DELETE, "/channels/{channel}")
POST_CHANNEL_FOLLOWERS: typing.Final[Route] = Route(POST, "/channels/{channel}/followers")
GET_CHANNEL_INVITES: typing.Final[Route] = Route(GET, "/channels/{channel}/invites")
POST_CHANNEL_INVITES: typing.Final[Route] = Route(POST, "/channels/{channel}/invites")
GET_CHANNEL_MESSAGE: typing.Final[Route] = Route(GET, "/channels/{channel}/messages/{message}")
PATCH_CHANNEL_MESSAGE: typing.Final[Route] = Route(PATCH, "/channels/{channel}/messages/{message}")
DELETE_CHANNEL_MESSAGE: typing.Final[Route] = Route(DELETE, "/channels/{channel}/messages/{message}")
POST_CHANNEL_CROSSPOST: typing.Final[Route] = Route(POST, "/channels/{channel}/messages/{message}/crosspost")
GET_CHANNEL_MESSAGES: typing.Final[Route] = Route(GET, "/channels/{channel}/messages")
POST_CHANNEL_MESSAGES: typing.Final[Route] = Route(POST, "/channels/{channel}/messages")
POST_DELETE_CHANNEL_MESSAGES_BULK: typing.Final[Route] = Route(POST, "/channels/{channel}/messages/bulk-delete")
PUT_CHANNEL_PERMISSIONS: typing.Final[Route] = Route(PUT, "/channels/{channel}/permissions/{overwrite}")
DELETE_CHANNEL_PERMISSIONS: typing.Final[Route] = Route(DELETE, "/channels/{channel}/permissions/{overwrite}")
GET_CHANNEL_PINS: typing.Final[Route] = Route(GET, "/channels/{channel}/pins")
PUT_CHANNEL_PINS: typing.Final[Route] = Route(PUT, "/channels/{channel}/pins/{message}")
DELETE_CHANNEL_PIN: typing.Final[Route] = Route(DELETE, "/channels/{channel}/pins/{message}")
POST_CHANNEL_TYPING: typing.Final[Route] = Route(POST, "/channels/{channel}/typing")
POST_CHANNEL_WEBHOOKS: typing.Final[Route] = Route(POST, "/channels/{channel}/webhooks")
GET_CHANNEL_WEBHOOKS: typing.Final[Route] = Route(GET, "/channels/{channel}/webhooks")
# Reactions
GET_REACTIONS: typing.Final[Route] = Route(GET, "/channels/{channel}/messages/{message}/reactions/{emoji}")
DELETE_ALL_REACTIONS: typing.Final[Route] = Route(DELETE, "/channels/{channel}/messages/{message}/reactions")
DELETE_REACTION_EMOJI: typing.Final[Route] = Route(DELETE, "/channels/{channel}/messages/{message}/reactions/{emoji}")
DELETE_REACTION_USER: typing.Final[Route] = Route(
DELETE, "/channels/{channel}/messages/{message}/reactions/{emoji}/{user}"
)
# Guilds
GET_GUILD: typing.Final[Route] = Route(GET, "/guilds/{guild}")
POST_GUILDS: typing.Final[Route] = Route(POST, "/guilds")
PATCH_GUILD: typing.Final[Route] = Route(PATCH, "/guilds/{guild}")
DELETE_GUILD: typing.Final[Route] = Route(DELETE, "/guilds/{guild}")
GET_GUILD_AUDIT_LOGS: typing.Final[Route] = Route(GET, "/guilds/{guild}/audit-logs")
GET_GUILD_BAN: typing.Final[Route] = Route(GET, "/guilds/{guild}/bans/{user}")
PUT_GUILD_BAN: typing.Final[Route] = Route(PUT, "/guilds/{guild}/bans/{user}")
DELETE_GUILD_BAN: typing.Final[Route] = Route(DELETE, "/guilds/{guild}/bans/{user}")
GET_GUILD_BANS: typing.Final[Route] = Route(GET, "/guilds/{guild}/bans")
GET_GUILD_CHANNELS: typing.Final[Route] = Route(GET, "/guilds/{guild}/channels")
POST_GUILD_CHANNELS: typing.Final[Route] = Route(POST, "/guilds/{guild}/channels")
PATCH_GUILD_CHANNELS: typing.Final[Route] = Route(PATCH, "/guilds/{guild}/channels")
GET_GUILD_WIDGET: typing.Final[Route] = Route(GET, "/guilds/{guild}/widget")
PATCH_GUILD_WIDGET: typing.Final[Route] = Route(PATCH, "/guilds/{guild}/widget")
GET_GUILD_WELCOME_SCREEN: typing.Final[Route] = Route(GET, "/guilds/{guild}/welcome-screen")
PATCH_GUILD_WELCOME_SCREEN: typing.Final[Route] = Route(PATCH, "/guilds/{guild}/welcome-screen")
GET_GUILD_MEMBER_VERIFICATION: typing.Final[Route] = Route(GET, "/guilds/{guild}/member-verification")
PATCH_GUILD_MEMBER_VERIFICATION: typing.Final[Route] = Route(PATCH, "/guilds/{guild}/member-verification")
GET_GUILD_EMOJI: typing.Final[Route] = Route(GET, "/guilds/{guild}/emojis/{emoji}")
PATCH_GUILD_EMOJI: typing.Final[Route] = Route(PATCH, "/guilds/{guild}/emojis/{emoji}")
DELETE_GUILD_EMOJI: typing.Final[Route] = Route(DELETE, "/guilds/{guild}/emojis/{emoji}")
GET_GUILD_EMOJIS: typing.Final[Route] = Route(GET, "/guilds/{guild}/emojis")
POST_GUILD_EMOJIS: typing.Final[Route] = Route(POST, "/guilds/{guild}/emojis")
GET_GUILD_INTEGRATIONS: typing.Final[Route] = Route(GET, "/guilds/{guild}/integrations")
DELETE_GUILD_INTEGRATION: typing.Final[Route] = Route(DELETE, "/guilds/{guild}/integrations/{integration}")
GET_GUILD_INVITES: typing.Final[Route] = Route(GET, "/guilds/{guild}/invites")
GET_GUILD_MEMBER: typing.Final[Route] = Route(GET, "/guilds/{guild}/members/{user}")
PATCH_GUILD_MEMBER: typing.Final[Route] = Route(PATCH, "/guilds/{guild}/members/{user}")
PUT_GUILD_MEMBER: typing.Final[Route] = Route(PUT, "/guilds/{guild}/members/{user}")
GET_GUILD_MEMBERS: typing.Final[Route] = Route(GET, "/guilds/{guild}/members")
DELETE_GUILD_MEMBER: typing.Final[Route] = Route(DELETE, "/guilds/{guild}/members/{user}")
GET_GUILD_MEMBERS_SEARCH: typing.Final[Route] = Route(GET, "/guilds/{guild}/members/search")
PUT_GUILD_MEMBER_ROLE: typing.Final[Route] = Route(PUT, "/guilds/{guild}/members/{user}/roles/{role}")
DELETE_GUILD_MEMBER_ROLE: typing.Final[Route] = Route(DELETE, "/guilds/{guild}/members/{user}/roles/{role}")
GET_GUILD_PREVIEW: typing.Final[Route] = Route(GET, "/guilds/{guild}/preview")
GET_GUILD_PRUNE: typing.Final[Route] = Route(GET, "/guilds/{guild}/prune")
POST_GUILD_PRUNE: typing.Final[Route] = Route(POST, "/guilds/{guild}/prune")
PATCH_GUILD_ROLE: typing.Final[Route] = Route(PATCH, "/guilds/{guild}/roles/{role}")
DELETE_GUILD_ROLE: typing.Final[Route] = Route(DELETE, "/guilds/{guild}/roles/{role}")
GET_GUILD_ROLES: typing.Final[Route] = Route(GET, "/guilds/{guild}/roles")
POST_GUILD_ROLES: typing.Final[Route] = Route(POST, "/guilds/{guild}/roles")
PATCH_GUILD_ROLES: typing.Final[Route] = Route(PATCH, "/guilds/{guild}/roles")
GET_GUILD_VANITY_URL: typing.Final[Route] = Route(GET, "/guilds/{guild}/vanity-url")
PATCH_GUILD_VOICE_STATE: typing.Final[Route] = Route(PATCH, "/guilds/{guild}/voice-states/{user}")
PATCH_MY_GUILD_VOICE_STATE: typing.Final[Route] = Route(PATCH, "/guilds/{guild}/voice-states/@me")
GET_GUILD_VOICE_REGIONS: typing.Final[Route] = Route(GET, "/guilds/{guild}/regions")
GET_GUILD_WEBHOOKS: typing.Final[Route] = Route(GET, "/guilds/{guild}/webhooks")
GET_GUILD_BANNER_IMAGE: typing.Final[Route] = Route(GET, "/guilds/{guild}/widget.png")
# Templates
DELETE_GUILD_TEMPLATE: typing.Final[Route] = Route(DELETE, "/guilds/{guild}/templates/{template}")
GET_TEMPLATE: typing.Final[Route] = Route(GET, "/guilds/templates/{template}")
GET_GUILD_TEMPLATES: typing.Final[Route] = Route(GET, "/guilds/{guild}/templates")
PATCH_GUILD_TEMPLATE: typing.Final[Route] = Route(PATCH, "/guilds/{guild}/templates/{template}")
POST_GUILD_TEMPLATES: typing.Final[Route] = Route(POST, "/guilds/{guild}/templates")
POST_TEMPLATE: typing.Final[Route] = Route(POST, "/guilds/templates/{template}")
PUT_GUILD_TEMPLATE: typing.Final[Route] = Route(PUT, "/guilds/{guild}/templates/{template}")
# Invites
GET_INVITE: typing.Final[Route] = Route(GET, "/invites/{invite_code}")
DELETE_INVITE: typing.Final[Route] = Route(DELETE, "/invites/{invite_code}")
# Users
GET_USER: typing.Final[Route] = Route(GET, "/users/{user}")
# @me
POST_MY_CHANNELS: typing.Final[Route] = Route(POST, "/users/@me/channels")
GET_MY_CONNECTIONS: typing.Final[Route] = Route(GET, "/users/@me/connections") # OAuth2 only
DELETE_MY_GUILD: typing.Final[Route] = Route(DELETE, "/users/@me/guilds/{guild}")
GET_MY_GUILDS: typing.Final[Route] = Route(GET, "/users/@me/guilds")
PATCH_MY_GUILD_NICKNAME: typing.Final[Route] = Route(PATCH, "/guilds/{guild}/members/@me/nick")
GET_MY_USER: typing.Final[Route] = Route(GET, "/users/@me")
PATCH_MY_USER: typing.Final[Route] = Route(PATCH, "/users/@me")
PUT_MY_REACTION: typing.Final[Route] = Route(PUT, "/channels/{channel}/messages/{message}/reactions/{emoji}/@me")
DELETE_MY_REACTION: typing.Final[Route] = Route(DELETE, "/channels/{channel}/messages/{message}/reactions/{emoji}/@me")
# Voice
GET_VOICE_REGIONS: typing.Final[Route] = Route(GET, "/voice/regions")
# Webhooks
GET_WEBHOOK: typing.Final[Route] = Route(GET, "/webhooks/{webhook}")
PATCH_WEBHOOK: typing.Final[Route] = Route(PATCH, "/webhooks/{webhook}")
DELETE_WEBHOOK: typing.Final[Route] = Route(DELETE, "/webhooks/{webhook}")
GET_WEBHOOK_WITH_TOKEN: typing.Final[Route] = Route(GET, "/webhooks/{webhook}/{token}")
PATCH_WEBHOOK_WITH_TOKEN: typing.Final[Route] = Route(PATCH, "/webhooks/{webhook}/{token}")
DELETE_WEBHOOK_WITH_TOKEN: typing.Final[Route] = Route(DELETE, "/webhooks/{webhook}/{token}")
POST_WEBHOOK_WITH_TOKEN: typing.Final[Route] = Route(POST, "/webhooks/{webhook}/{token}")
POST_WEBHOOK_WITH_TOKEN_GITHUB: typing.Final[Route] = Route(POST, "/webhooks/{webhook}/{token}/github")
POST_WEBHOOK_WITH_TOKEN_SLACK: typing.Final[Route] = Route(POST, "/webhooks/{webhook}/{token}/slack")
GET_WEBHOOK_MESSAGE: typing.Final[Route] = Route(GET, "/webhooks/{webhook}/{token}/messages/{message}")
PATCH_WEBHOOK_MESSAGE: typing.Final[Route] = Route(PATCH, "/webhooks/{webhook}/{token}/messages/{message}")
DELETE_WEBHOOK_MESSAGE: typing.Final[Route] = Route(DELETE, "/webhooks/{webhook}/{token}/messages/{message}")
# Applications
GET_APPLICATION_COMMAND: typing.Final[Route] = Route(GET, "/applications/{application}/commands/{command}")
GET_APPLICATION_COMMANDS: typing.Final[Route] = Route(GET, "/applications/{application}/commands")
PATCH_APPLICATION_COMMAND: typing.Final[Route] = Route(PATCH, "/applications/{application}/commands/{command}")
POST_APPLICATION_COMMAND: typing.Final[Route] = Route(POST, "/applications/{application}/commands")
PUT_APPLICATION_COMMANDS: typing.Final[Route] = Route(PUT, "/applications/{application}/commands")
DELETE_APPLICATION_COMMAND: typing.Final[Route] = Route(DELETE, "/applications/{application}/commands/{command}")
GET_APPLICATION_GUILD_COMMAND: typing.Final[Route] = Route(
GET, "/applications/{application}/guilds/{guild}/commands/{command}"
)
GET_APPLICATION_GUILD_COMMANDS: typing.Final[Route] = Route(GET, "/applications/{application}/guilds/{guild}/commands")
PATCH_APPLICATION_GUILD_COMMAND: typing.Final[Route] = Route(
PATCH, "/applications/{application}/guilds/{guild}/commands/{command}"
)
POST_APPLICATION_GUILD_COMMAND: typing.Final[Route] = Route(POST, "/applications/{application}/guilds/{guild}/commands")
PUT_APPLICATION_GUILD_COMMANDS: typing.Final[Route] = Route(PUT, "/applications/{application}/guilds/{guild}/commands")
DELETE_APPLICATION_GUILD_COMMAND: typing.Final[Route] = Route(
DELETE, "/applications/{application}/guilds/{guild}/commands/{command}"
)
# Interactions
# For these endpoints "webhook" is the application ID.
GET_INTERACTION_RESPONSE: typing.Final[Route] = Route(GET, "/webhooks/{webhook}/{token}/messages/@original")
PATCH_INTERACTION_RESPONSE: typing.Final[Route] = Route(PATCH, "/webhooks/{webhook}/{token}/messages/@original")
POST_INTERACTION_RESPONSE: typing.Final[Route] = Route(POST, "/interactions/{interaction}/{token}/callback")
DELETE_INTERACTION_RESPONSE: typing.Final[Route] = Route(DELETE, "/webhooks/{webhook}/{token}/messages/@original")
# OAuth2 API
GET_MY_APPLICATION: typing.Final[Route] = Route(GET, "/oauth2/applications/@me")
GET_MY_AUTHORIZATION: typing.Final[Route] = Route(GET, "/oauth2/@me")
POST_AUTHORIZE: typing.Final[Route] = Route(POST, "/oauth2/authorize")
POST_TOKEN: typing.Final[Route] = Route(POST, "/oauth2/token")
POST_TOKEN_REVOKE: typing.Final[Route] = Route(POST, "/oauth2/token/revoke")
# Gateway
GET_GATEWAY: typing.Final[Route] = Route(GET, "/gateway")
GET_GATEWAY_BOT: typing.Final[Route] = Route(GET, "/gateway/bot")
PNG: typing.Final[str] = "png".casefold()
JPEG: typing.Final[str] = "jpeg".casefold()
WEBP: typing.Final[str] = "webp".casefold()
GIF: typing.Final[str] = "gif".casefold()
# CDN specific endpoints. These reside on a different server.
CDN_CUSTOM_EMOJI: typing.Final[CDNRoute] = CDNRoute("/emojis/{emoji_id}", {PNG, GIF})
CDN_GUILD_ICON: typing.Final[CDNRoute] = CDNRoute("/icons/{guild_id}/{hash}", {PNG, JPEG, WEBP, GIF})
CDN_GUILD_SPLASH: typing.Final[CDNRoute] = CDNRoute("/splashes/{guild_id}/{hash}", {PNG, JPEG, WEBP})
CDN_GUILD_DISCOVERY_SPLASH: typing.Final[CDNRoute] = CDNRoute(
"/discovery-splashes/{guild_id}/{hash}", {PNG, JPEG, WEBP}
)
CDN_GUILD_BANNER: typing.Final[CDNRoute] = CDNRoute("/banners/{guild_id}/{hash}", {PNG, JPEG, WEBP})
CDN_DEFAULT_USER_AVATAR: typing.Final[CDNRoute] = CDNRoute("/embed/avatars/{discriminator}", {PNG}, sizable=False)
CDN_USER_AVATAR: typing.Final[CDNRoute] = CDNRoute("/avatars/{user_id}/{hash}", {PNG, JPEG, WEBP, GIF})
CDN_APPLICATION_ICON: typing.Final[CDNRoute] = CDNRoute("/app-icons/{application_id}/{hash}", {PNG, JPEG, WEBP})
CDN_APPLICATION_COVER: typing.Final[CDNRoute] = CDNRoute("/app-assets/{application_id}/{hash}", {PNG, JPEG, WEBP})
CDN_ACHIEVEMENT_ICON: typing.Final[CDNRoute] = CDNRoute(
"/app-assets/{application_id}/achievements/{achievement_id}/icons/{hash}", {PNG, JPEG, WEBP}
)
CDN_TEAM_ICON: typing.Final[CDNRoute] = CDNRoute("/team-icons/{team_id}/{hash}", {PNG, JPEG, WEBP})
# undocumented on the Discord docs.
CDN_CHANNEL_ICON: typing.Final[CDNRoute] = CDNRoute("/channel-icons/{channel_id}/{hash}", {PNG, JPEG, WEBP})
|
py | b40a80c949d0deb83e0a4568b7150bba10900b9c | # -*- coding: utf-8 -*-
"""CCXT: CryptoCurrency eXchange Trading Library (Async)"""
# -----------------------------------------------------------------------------
__version__ = '1.41.100'
# -----------------------------------------------------------------------------
from ccxt.async_support.base.exchange import Exchange # noqa: F401
from ccxt.base.decimal_to_precision import decimal_to_precision # noqa: F401
from ccxt.base.decimal_to_precision import TRUNCATE # noqa: F401
from ccxt.base.decimal_to_precision import ROUND # noqa: F401
from ccxt.base.decimal_to_precision import TICK_SIZE # noqa: F401
from ccxt.base.decimal_to_precision import DECIMAL_PLACES # noqa: F401
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS # noqa: F401
from ccxt.base.decimal_to_precision import NO_PADDING # noqa: F401
from ccxt.base.decimal_to_precision import PAD_WITH_ZERO # noqa: F401
from ccxt.base import errors # noqa: F401
from ccxt.base.errors import BaseError # noqa: F401
from ccxt.base.errors import ExchangeError # noqa: F401
from ccxt.base.errors import AuthenticationError # noqa: F401
from ccxt.base.errors import PermissionDenied # noqa: F401
from ccxt.base.errors import AccountSuspended # noqa: F401
from ccxt.base.errors import ArgumentsRequired # noqa: F401
from ccxt.base.errors import BadRequest # noqa: F401
from ccxt.base.errors import BadSymbol # noqa: F401
from ccxt.base.errors import BadResponse # noqa: F401
from ccxt.base.errors import NullResponse # noqa: F401
from ccxt.base.errors import InsufficientFunds # noqa: F401
from ccxt.base.errors import InvalidAddress # noqa: F401
from ccxt.base.errors import AddressPending # noqa: F401
from ccxt.base.errors import InvalidOrder # noqa: F401
from ccxt.base.errors import OrderNotFound # noqa: F401
from ccxt.base.errors import OrderNotCached # noqa: F401
from ccxt.base.errors import CancelPending # noqa: F401
from ccxt.base.errors import OrderImmediatelyFillable # noqa: F401
from ccxt.base.errors import OrderNotFillable # noqa: F401
from ccxt.base.errors import DuplicateOrderId # noqa: F401
from ccxt.base.errors import NotSupported # noqa: F401
from ccxt.base.errors import NetworkError # noqa: F401
from ccxt.base.errors import DDoSProtection # noqa: F401
from ccxt.base.errors import RateLimitExceeded # noqa: F401
from ccxt.base.errors import ExchangeNotAvailable # noqa: F401
from ccxt.base.errors import OnMaintenance # noqa: F401
from ccxt.base.errors import InvalidNonce # noqa: F401
from ccxt.base.errors import RequestTimeout # noqa: F401
from ccxt.base.errors import error_hierarchy # noqa: F401
from ccxt.async_support.aax import aax # noqa: F401
from ccxt.async_support.acx import acx # noqa: F401
from ccxt.async_support.aofex import aofex # noqa: F401
from ccxt.async_support.bequant import bequant # noqa: F401
from ccxt.async_support.bibox import bibox # noqa: F401
from ccxt.async_support.bigone import bigone # noqa: F401
from ccxt.async_support.binance import binance # noqa: F401
from ccxt.async_support.binanceus import binanceus # noqa: F401
from ccxt.async_support.bit2c import bit2c # noqa: F401
from ccxt.async_support.bitbank import bitbank # noqa: F401
from ccxt.async_support.bitbay import bitbay # noqa: F401
from ccxt.async_support.bitcoincom import bitcoincom # noqa: F401
from ccxt.async_support.bitfinex import bitfinex # noqa: F401
from ccxt.async_support.bitfinex2 import bitfinex2 # noqa: F401
from ccxt.async_support.bitflyer import bitflyer # noqa: F401
from ccxt.async_support.bitforex import bitforex # noqa: F401
from ccxt.async_support.bitget import bitget # noqa: F401
from ccxt.async_support.bithumb import bithumb # noqa: F401
from ccxt.async_support.bitkk import bitkk # noqa: F401
from ccxt.async_support.bitmart import bitmart # noqa: F401
from ccxt.async_support.bitmax import bitmax # noqa: F401
from ccxt.async_support.bitmex import bitmex # noqa: F401
from ccxt.async_support.bitpanda import bitpanda # noqa: F401
from ccxt.async_support.bitso import bitso # noqa: F401
from ccxt.async_support.bitstamp import bitstamp # noqa: F401
from ccxt.async_support.bitstamp1 import bitstamp1 # noqa: F401
from ccxt.async_support.bittrex import bittrex # noqa: F401
from ccxt.async_support.bitvavo import bitvavo # noqa: F401
from ccxt.async_support.bitz import bitz # noqa: F401
from ccxt.async_support.bl3p import bl3p # noqa: F401
from ccxt.async_support.bleutrade import bleutrade # noqa: F401
from ccxt.async_support.braziliex import braziliex # noqa: F401
from ccxt.async_support.btcalpha import btcalpha # noqa: F401
from ccxt.async_support.btcbox import btcbox # noqa: F401
from ccxt.async_support.btcmarkets import btcmarkets # noqa: F401
from ccxt.async_support.btctradeua import btctradeua # noqa: F401
from ccxt.async_support.btcturk import btcturk # noqa: F401
from ccxt.async_support.buda import buda # noqa: F401
from ccxt.async_support.bw import bw # noqa: F401
from ccxt.async_support.bybit import bybit # noqa: F401
from ccxt.async_support.bytetrade import bytetrade # noqa: F401
from ccxt.async_support.cdax import cdax # noqa: F401
from ccxt.async_support.cex import cex # noqa: F401
from ccxt.async_support.chilebit import chilebit # noqa: F401
from ccxt.async_support.coinbase import coinbase # noqa: F401
from ccxt.async_support.coinbaseprime import coinbaseprime # noqa: F401
from ccxt.async_support.coinbasepro import coinbasepro # noqa: F401
from ccxt.async_support.coincheck import coincheck # noqa: F401
from ccxt.async_support.coinegg import coinegg # noqa: F401
from ccxt.async_support.coinex import coinex # noqa: F401
from ccxt.async_support.coinfalcon import coinfalcon # noqa: F401
from ccxt.async_support.coinfloor import coinfloor # noqa: F401
from ccxt.async_support.coingi import coingi # noqa: F401
from ccxt.async_support.coinmarketcap import coinmarketcap # noqa: F401
from ccxt.async_support.coinmate import coinmate # noqa: F401
from ccxt.async_support.coinone import coinone # noqa: F401
from ccxt.async_support.coinspot import coinspot # noqa: F401
from ccxt.async_support.crex24 import crex24 # noqa: F401
from ccxt.async_support.currencycom import currencycom # noqa: F401
from ccxt.async_support.delta import delta # noqa: F401
from ccxt.async_support.deribit import deribit # noqa: F401
from ccxt.async_support.digifinex import digifinex # noqa: F401
from ccxt.async_support.dsx import dsx # noqa: F401
from ccxt.async_support.eterbase import eterbase # noqa: F401
from ccxt.async_support.exmo import exmo # noqa: F401
from ccxt.async_support.exx import exx # noqa: F401
from ccxt.async_support.fcoin import fcoin # noqa: F401
from ccxt.async_support.fcoinjp import fcoinjp # noqa: F401
from ccxt.async_support.flowbtc import flowbtc # noqa: F401
from ccxt.async_support.foxbit import foxbit # noqa: F401
from ccxt.async_support.ftx import ftx # noqa: F401
from ccxt.async_support.gateio import gateio # noqa: F401
from ccxt.async_support.gemini import gemini # noqa: F401
from ccxt.async_support.gopax import gopax # noqa: F401
from ccxt.async_support.hbtc import hbtc # noqa: F401
from ccxt.async_support.hitbtc import hitbtc # noqa: F401
from ccxt.async_support.hollaex import hollaex # noqa: F401
from ccxt.async_support.huobijp import huobijp # noqa: F401
from ccxt.async_support.huobipro import huobipro # noqa: F401
from ccxt.async_support.ice3x import ice3x # noqa: F401
from ccxt.async_support.idex import idex # noqa: F401
from ccxt.async_support.independentreserve import independentreserve # noqa: F401
from ccxt.async_support.indodax import indodax # noqa: F401
from ccxt.async_support.itbit import itbit # noqa: F401
from ccxt.async_support.kraken import kraken # noqa: F401
from ccxt.async_support.kucoin import kucoin # noqa: F401
from ccxt.async_support.kuna import kuna # noqa: F401
from ccxt.async_support.lakebtc import lakebtc # noqa: F401
from ccxt.async_support.latoken import latoken # noqa: F401
from ccxt.async_support.lbank import lbank # noqa: F401
from ccxt.async_support.liquid import liquid # noqa: F401
from ccxt.async_support.luno import luno # noqa: F401
from ccxt.async_support.lykke import lykke # noqa: F401
from ccxt.async_support.mercado import mercado # noqa: F401
from ccxt.async_support.mixcoins import mixcoins # noqa: F401
from ccxt.async_support.novadax import novadax # noqa: F401
from ccxt.async_support.oceanex import oceanex # noqa: F401
from ccxt.async_support.okcoin import okcoin # noqa: F401
from ccxt.async_support.okex import okex # noqa: F401
from ccxt.async_support.paymium import paymium # noqa: F401
from ccxt.async_support.phemex import phemex # noqa: F401
from ccxt.async_support.poloniex import poloniex # noqa: F401
from ccxt.async_support.probit import probit # noqa: F401
from ccxt.async_support.qtrade import qtrade # noqa: F401
from ccxt.async_support.rightbtc import rightbtc # noqa: F401
from ccxt.async_support.ripio import ripio # noqa: F401
from ccxt.async_support.southxchange import southxchange # noqa: F401
from ccxt.async_support.stex import stex # noqa: F401
from ccxt.async_support.surbitcoin import surbitcoin # noqa: F401
from ccxt.async_support.therock import therock # noqa: F401
from ccxt.async_support.tidebit import tidebit # noqa: F401
from ccxt.async_support.tidex import tidex # noqa: F401
from ccxt.async_support.timex import timex # noqa: F401
from ccxt.async_support.upbit import upbit # noqa: F401
from ccxt.async_support.vaultoro import vaultoro # noqa: F401
from ccxt.async_support.vbtc import vbtc # noqa: F401
from ccxt.async_support.vcc import vcc # noqa: F401
from ccxt.async_support.wavesexchange import wavesexchange # noqa: F401
from ccxt.async_support.whitebit import whitebit # noqa: F401
from ccxt.async_support.xbtce import xbtce # noqa: F401
from ccxt.async_support.xena import xena # noqa: F401
from ccxt.async_support.yobit import yobit # noqa: F401
from ccxt.async_support.zaif import zaif # noqa: F401
from ccxt.async_support.zb import zb # noqa: F401
exchanges = [
'aax',
'acx',
'aofex',
'bequant',
'bibox',
'bigone',
'binance',
'binanceus',
'bit2c',
'bitbank',
'bitbay',
'bitcoincom',
'bitfinex',
'bitfinex2',
'bitflyer',
'bitforex',
'bitget',
'bithumb',
'bitkk',
'bitmart',
'bitmax',
'bitmex',
'bitpanda',
'bitso',
'bitstamp',
'bitstamp1',
'bittrex',
'bitvavo',
'bitz',
'bl3p',
'bleutrade',
'braziliex',
'btcalpha',
'btcbox',
'btcmarkets',
'btctradeua',
'btcturk',
'buda',
'bw',
'bybit',
'bytetrade',
'cdax',
'cex',
'chilebit',
'coinbase',
'coinbaseprime',
'coinbasepro',
'coincheck',
'coinegg',
'coinex',
'coinfalcon',
'coinfloor',
'coingi',
'coinmarketcap',
'coinmate',
'coinone',
'coinspot',
'crex24',
'currencycom',
'delta',
'deribit',
'digifinex',
'dsx',
'eterbase',
'exmo',
'exx',
'fcoin',
'fcoinjp',
'flowbtc',
'foxbit',
'ftx',
'gateio',
'gemini',
'gopax',
'hbtc',
'hitbtc',
'hollaex',
'huobijp',
'huobipro',
'ice3x',
'idex',
'independentreserve',
'indodax',
'itbit',
'kraken',
'kucoin',
'kuna',
'lakebtc',
'latoken',
'lbank',
'liquid',
'luno',
'lykke',
'mercado',
'mixcoins',
'novadax',
'oceanex',
'okcoin',
'okex',
'paymium',
'phemex',
'poloniex',
'probit',
'qtrade',
'rightbtc',
'ripio',
'southxchange',
'stex',
'surbitcoin',
'therock',
'tidebit',
'tidex',
'timex',
'upbit',
'vaultoro',
'vbtc',
'vcc',
'wavesexchange',
'whitebit',
'xbtce',
'xena',
'yobit',
'zaif',
'zb',
]
base = [
'Exchange',
'exchanges',
'decimal_to_precision',
]
__all__ = base + errors.__all__ + exchanges
|
py | b40a82670ea346149aaf3640009134ba1053fa90 | # Copyright (C) 2018 Alpha Griffin
# @%@~LICENSE~@%@
from . import config, TokenError
from bitcash.format import public_key_to_address
from os import path
import sqlite3
from hashlib import sha256
class TokenDB:
def __init__(self, auto_commit=True):
isolation = 'EXCLUSIVE' if not auto_commit else None
conn = sqlite3.connect(path.join(config.dir, 'tokens.db'), isolation_level=isolation)
conn.execute('''CREATE TABLE IF NOT EXISTS status (
key TEXT NOT NULL PRIMARY KEY,
value TEXT
)''')
#
# Block and tx data
#
conn.execute('''CREATE TABLE IF NOT EXISTS block (
hash TEXT NOT NULL PRIMARY KEY,
height INTEGER NOT NULL,
orbit BLOB
)''')
conn.execute('''CREATE INDEX IF NOT EXISTS idx_block_height ON block (height)''')
conn.execute('''CREATE TABLE IF NOT EXISTS tx (
hash TEXT NOT NULL PRIMARY KEY,
block INTEGER NOT NULL,
confirmations INTEGER NOT NULL
)''')
conn.execute('''CREATE INDEX IF NOT EXISTS idx_tx_block ON tx (block)''')
conn.execute('''CREATE TABLE IF NOT EXISTS txin (
hash TEXT NOT NULL PRIMARY KEY,
tx INTEGER NOT NULL,
asmhex TEXT NOT NULL
)''')
conn.execute('''CREATE INDEX IF NOT EXISTS idx_txin_tx ON txin (tx)''')
conn.execute('''CREATE TABLE IF NOT EXISTS txout (
tx INTEGER NOT NULL,
value INTEGER NOT NULL,
type TEXT NOT NULL,
addresses TEXT,
asmhex TEXT NOT NULL
)''')
conn.execute('''CREATE INDEX IF NOT EXISTS idx_txout_tx ON txout (tx)''')
#
# Tokens and balances
#
conn.execute('''CREATE TABLE IF NOT EXISTS token (
address TEXT NOT NULL PRIMARY KEY,
tx INTEGER NOT NULL,
created INTEGER NOT NULL,
updated INTEGER NOT NULL,
supply INTEGER NOT NULL,
decimals INTEGER NOT NULL,
symbol TEXT NOT NULL,
name TEXT,
main_uri TEXT,
image_uri TEXT
)''')
conn.execute('''CREATE INDEX IF NOT EXISTS idx_token_symbol ON token (symbol)''')
conn.execute('''CREATE INDEX IF NOT EXISTS idx_token_updated ON token (updated)''')
conn.execute('''CREATE TABLE IF NOT EXISTS balance (
address TEXT NOT NULL,
token INTEGER NOT NULL,
updated INTEGER NOT NULL,
units INTEGER NOT NULL,
available INTEGER NOT NULL,
PRIMARY KEY (address, token)
)''')
conn.execute('''CREATE INDEX IF NOT EXISTS idx_balance_updated ON balance (updated)''')
#
# Events
# TODO: will need to remove primary key from tx if we ever support multiple operations in one transaction
#
conn.execute('''CREATE TABLE IF NOT EXISTS transfer (
tx INTEGER NOT NULL PRIMARY KEY,
created INTEGER NOT NULL,
addr_from TEXT NOT NULL,
addr_to TEXT NOT NULL,
units INTEGER
)''')
conn.execute('''CREATE INDEX IF NOT EXISTS idx_transfer_created ON transfer (created)''')
conn.execute('''CREATE TABLE IF NOT EXISTS advertisement (
tx INTEGER NOT NULL PRIMARY KEY,
token INTEGER NOT NULL,
created INTEGER NOT NULL,
updated INTEGER NOT NULL,
finished INTEGER,
begins INTEGER NOT NULL,
ends INTEGER,
delivers INTEGER NOT NULL,
available INTEGER NOT NULL,
claimed INTEGER NOT NULL,
rate INTEGER,
minimum INTEGER NOT NULL,
maximum INTEGER NOT NULL,
preregister TEXT NULL
)''')
conn.execute('''CREATE TABLE IF NOT EXISTS registration (
tx INTEGER NOT NULL PRIMARY KEY,
address TEXT NOT NULL,
advertisement INTEGER NOT NULL,
created INTEGER NOT NULL,
updated INTEGER NOT NULL,
finished INTEGER,
maximum INTEGER NOT NULL,
payments INTEGER NOT NULL,
claimed INTEGER NOT NULL
)''')
keys = conn.execute('''SELECT key FROM status''').fetchall()
self._init_status(conn, keys, 'height')
conn.commit()
self.conn = conn
@classmethod
def _init_status(self, conn, keys, key):
for k in keys:
if k[0] == key:
return
conn.execute('''INSERT INTO status (key) VALUES (?)''', (key,))
def commit(self):
self.conn.commit()
def close(self):
self.conn.close()
def _set_status(self, key, value):
self.conn.execute('''UPDATE status SET value = ? WHERE key = ?''', (value, key))
def _get_status(self, key):
return self.conn.execute('''SELECT value FROM status WHERE key = ?''', (key,)).fetchone()[0]
def get_last_block(self):
height = self._get_status('height')
if height is None:
return None
return int(height)
def set_last_block(self, height):
self._set_status('height', height)
def save_block(self, blockhash, height):
cursor = self.conn.cursor()
cursor.execute('''INSERT INTO block
(hash, height)
VALUES (?, ?)''',
(blockhash, height))
return cursor.lastrowid
def save_tx(self, txhash, block, confirmations):
cursor = self.conn.cursor()
cursor.execute('''INSERT INTO tx
(hash, block, confirmations)
VALUES (?, ?, ?)''',
(txhash, block, confirmations))
return cursor.lastrowid
def save_txin(self, txhash, tx, asmhex):
cursor = self.conn.cursor()
cursor.execute('''INSERT INTO txin
(hash, tx, asmhex)
VALUES (?, ?, ?)''',
(txhash, tx, asmhex))
return cursor.lastrowid
def save_txout(self, tx, value, stype, addresses, asmhex):
cursor = self.conn.cursor()
cursor.execute('''INSERT INTO txout
(tx, value, type, addresses, asmhex)
VALUES (?, ?, ?, ?, ?)''',
(tx, value, stype, addresses, asmhex))
return cursor.lastrowid
def get_signer_address(self, txrow):
txins = self.conn.execute('''SELECT asmhex FROM txin WHERE tx = ?''', (txrow,)).fetchall()
address = None
for txin in txins:
asmhex = txin[0]
asm = bytes.fromhex(asmhex)
sig_size = int.from_bytes(asm[0:1], 'little')
pubkey_size = int.from_bytes(asm[sig_size+1:sig_size+2], 'little')
pubkey = asm[sig_size + 2 : sig_size + pubkey_size + 2]
pubkey_address = public_key_to_address(pubkey)
if not address:
address = pubkey_address
elif address != pubkey_address:
raise ValueError("Multiple signer keys are present in the transaction inputs")
return address
def token_create(self, address, tx, block, supply, decimals, symbol, name=None, main_uri=None, image_uri=None):
cursor = self.conn.cursor()
try:
cursor.execute('''INSERT INTO token
(address, tx, created, updated, supply, decimals, symbol, name, main_uri, image_uri)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
(address, tx, block, block, supply, decimals, symbol, name, main_uri, image_uri))
tokenrow = cursor.lastrowid
except sqlite3.IntegrityError as e:
raise TokenError("A token is already defined at this address: {}".format(e))
# no try/except here... it's a critical error to be able to insert a token yet already have a blance for it
cursor.execute('''INSERT INTO balance
(address, token, updated, units, available)
VALUES (?, ?, ?, ?, ?)''',
(address, tokenrow, block, supply, supply))
return tokenrow
def _get_tokenrow(self, cursor, address):
token = cursor.execute('''SELECT rowid FROM token WHERE address = ?''', (address,)).fetchone()
if token is None:
raise TokenError("No token defined at the specified address")
return token[0]
def _get_balance(self, cursor, tokenrow, address, total=False):
balance = cursor.execute('''SELECT units, available FROM balance
WHERE token = ? AND address = ?''',
(tokenrow, address)).fetchone()
if not balance:
return None
return balance[0] if total else balance[1]
def token_transfer(self, address, txrow, blockrow, from_address, to_address, units):
cursor = self.conn.cursor()
if from_address == to_address:
raise TokenError("Transfer to address must be different than transfer from address")
tokenrow = self._get_tokenrow(cursor, address)
# validate source balance
balance = self._get_balance(cursor, tokenrow, from_address)
if balance is None:
raise TokenError("No balance for this token")
if balance < units:
raise TokenError("Insufficient available balance for this transfer")
# update source balance
cursor.execute('''UPDATE balance
SET updated = ?, units = units - ?, available = available - ?
WHERE token = ? AND address = ?''',
(blockrow, units, units, tokenrow, from_address))
# update destination balance
balance = self._get_balance(cursor, tokenrow, to_address)
if balance is None:
cursor.execute('''INSERT INTO balance
(address, token, updated, units, available)
VALUES (?, ?, ?, ?, ?)''',
(to_address, tokenrow, blockrow, units, units))
else:
cursor.execute('''UPDATE balance
SET updated = ?, units = units + ?, available = available + ?
WHERE token = ? AND address = ?''',
(blockrow, units, units, tokenrow, to_address))
# save transfer event
cursor.execute('''INSERT INTO transfer
(tx, created, addr_from, addr_to, units)
VALUES (?, ?, ?, ?, ?)''',
(txrow, blockrow, from_address, to_address, units))
return cursor.lastrowid
def token_advertise(self, address, txrow, blockrow, exchange_rate=None, units_avail=None, units_min=None, units_max=None,
block_begin=None, block_end=None, block_deliver=None, preregister=False):
cursor = self.conn.cursor()
tokenrow = self._get_tokenrow(cursor, address)
height = cursor.execute('''SELECT height FROM block WHERE rowid = ?''', (blockrow,)).fetchone()[0]
# block validation
if block_begin:
if block_begin <= height:
raise TokenError("Beginning block must occur after the advertisement block")
else:
block_begin = height + 1
if block_end:
if block_end < block_begin:
raise TokenError("Ending block must be on or after the beginning block")
if block_deliver:
if block_deliver < block_begin:
raise TokenError("Delivery block must be on or after the beginning block")
else:
block_deliver = block_begin
# existing advertisement checks
advertisement = cursor.execute('''SELECT 1 FROM advertisement
WHERE token = ? AND finished IS NULL AND begins <= ?
AND (ends IS NULL OR ends >= ?)
LIMIT 1''',
(tokenrow, block_begin, block_begin)).fetchone()
if advertisement:
raise TokenError("An existing advertisement is currently open")
advertisement = cursor.execute('''SELECT begins FROM advertisement
WHERE token = ? AND finished IS NULL AND begins > ?
ORDER BY begins LIMIT 1''',
(tokenrow, block_begins)).fetchone()
if advertisement:
if block_end:
if block_end >= advertisement[0]:
raise TokenError("An existing advertisement exists that begins before this one ends")
else:
raise TokenError("An existing advertisement begins in the future but this one has no ending block")
advertisement = cursor.execute("""SELECT 1 FROM advertisement
WHERE token = ? AND finished IS NULL AND begins > ? AND preregister = 'Y'""",
(tokenrow, block_begins)).fetchone()
if advertisement:
raise TokenError("An existing future advertisement allows preregistration")
# available balance validation and update
balance = self._get_balance(cursor, address)
if units_avail:
if balance < units_avail:
raise TokenError("Insufficient available balance to make available")
else:
units_avail = balance
if units_min:
if units_min > units_avail:
raise TokenError("Insufficient available balance for the specified minimum units")
else:
units_min = 1
if units_max:
# note that it's not an error if units_max > units_avail... this allows a per-user maximum to be
# set when units_avail might not be specified
pass
else:
units_max = units_avail
cursor.execute('''UPDATE balance
SET updated = ?, available = available - ?
WHERE token = ? AND address = ?''',
(blockrow, units_avail, tokenrow, address))
# save advertise event
cursor.execute('''INSERT INTO advertisement
(tx, token, created, updated, begins, ends, delivers, available, dispensed,
rate, minimum, maximum, preregister)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
(txrow, tokenrow, blockrow, blockrow, block_begin, block_end, block_deliver,
units_avail, 0, units_min, units_max,
'Y' if preregister else None))
return cursor.lastrowid
def token_advertise_cancel(self, address, txrow, blockrow, txhash):
cursor = self.conn.cursor()
tokenrow = self._get_tokenrow(cursor, address)
# validate advertisement
advertisement = cursor.execute('''SELECT a.rowid, a.token, a.finished, a.available, a.claimed
FROM tx
LEFT JOIN advertisement a ON a.tx = tx.rowid
WHERE tx.hash = ?''',
(txhash,)).fetchone()
if not advertisement:
raise TokenError("No advertisement exists for the given tx hash")
if tokenrow != advertisement[1]:
raise TokenError("Advertisement at the specified tx hash does not match the token indicated")
if advertisement[2] is not None:
raise TokenError("The advertisement has already finished")
# validate registrations
registrations = cursor.execute('''SELECT 1 FROM registration
WHERE advertisement = ?
LIMIT 1''',
(advertisement[0],))
if registrations:
#FIXME: just check that 'claimed' == 0 instead?
raise TokenError("There have already been registrations for this advertisement; it cannot be cancelled")
if advertisement[4] != 0:
raise ValueError("This advertisement indicates claims but no registrations were found")
# close advertisement and make balance available again
cursor.execute('''UPDATE advertisement
SET updated = ?, finished = ?
WHERE rowid = ?''',
(blockrow, blockrow, advertisement[0]))
cursor.execute('''UPDATE balance
SET updated = ?, available = available + ?
WHERE token = ? AND address = ?''',
(blockrow, advertisement[3], tokenrow, address))
return advertisement[0]
def get_eligible_advertisement_row(self, cursor, tokenrow, height):
advertisement = None
advertisements = cursor.execute('''SELECT rowid FROM advertisement
WHERE token = ? AND finished IS NULL AND begins <= ?
AND (ends IS NULL OR ends >= ?)''',
(tokenrow, height, height)).fetchall()
if advertisements:
if len(advertisements) > 1:
raise ValueError("There are multiple active advertisements")
advertisement = advertisements[0]
advertisements = cursor.execute("""SELECT rowid FROM advertisement
WHERE token = ? AND finished IS NULL AND begins > ? AND preregister = 'Y'""",
(tokenrow, height)).fetchall()
if advertisements:
if advertisement:
raise ValueError("There is an active advertisement but also a future advertisement allowing preregistration")
if len(advertisements) > 1:
raise ValueError("There are multiple future advertisements allowing preregistration")
advertisement = advertisements[0]
if not advertisement:
raise TokenError("There is no active advertisement or future advertisement allowing preregistration")
return advertisement[0]
def token_register(self, address, txrow, blockrow, user_address, units_max=None):
cursor = self.conn.cursor()
tokenrow = self._get_tokenrow(cursor, address)
height = cursor.execute('''SELECT height FROM block WHERE rowid = ?''', (blockrow,)).fetchone()[0]
advertisement = self.get_eligible_advertisement_row(cursor, tokenrow, height)
advertisement = cursor.execute('''SELECT rowid, minimum, maximum, rate, available, claimed, delivers
FROM advertisement
WHERE rowid = ?''',
(advertisement,)).fetchone()
if units_max < advertisement[1]:
raise TokenError('Specified maximum is less than the advertisement user-minimum required')
registrations = cursor.execute('''SELECT SUM(maximum)
FROM registration
WHERE address = ? and advertisement = ?''',
(user_address, advertisement[0])).fetchone()
max_remains = advertisement[2]
if registrations:
max_remains -= registrations[0]
if max_remains < 1:
raise TokenError('Maximum per-user units has already been registered')
unclaimed = advertisement[4] - advertisement[5]
if unclaimed < max_remains:
max_remains = unclaimed
if units_max > max_remains:
units_max = max_remains
if not advertisement[3]: # free faucet
units = units_max
available = (height > advertisement[6])
# note that if height == delivers then process_advertisements() will make the units available
# update source balance
cursor.execute('''UPDATE balance
SET updated = ?, units = units - ?
WHERE token = ? AND address = ?''',
(blockrow, units, tokenrow, address))
# update destination balance
balance = self._get_balance(cursor, tokenrow, user_address)
if balance is None:
cursor.execute('''INSERT INTO balance
(address, token, updated, units, available)
VALUES (?, ?, ?, ?, ?)''',
(user_address, tokenrow, blockrow, units, units if available else 0))
else:
cursor.execute('''UPDATE balance
SET updated = ?, units = units + ?, available = available + ?
WHERE token = ? AND address = ?''',
(blockrow, units, units if available else 0, tokenrow, user_address))
cursor.execute('''UPDATE advertisement
SET updated = ?, claimed = claimed + ?
WHERE rowid = ?''',
(blockrow, units, advertisement[0]))
# save transfer event
cursor.execute('''INSERT INTO transfer
(tx, created, addr_from, addr_to, units)
VALUES (?, ?, ?, ?, ?)''',
(txrow, blockrow, address, user_address, units))
else:
units = 0
cursor.execute('''INSERT INTO registration
(tx, address, advertisement, created, updated, finished, maximum, payments, claimed)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)''',
(txrow, user_address, advertisement[0], blockrow, blockrow,
blockrow if advertisement[3] else None, units_max, 0, units))
return cursor.lastrowid
def token_unregister(self, address, txrow, blockrow, user_address):
cursor = self.conn.cursor()
tokenrow = self._get_tokenrow(cursor, address)
height = cursor.execute('''SELECT height FROM block WHERE rowid = ?''', (blockrow,)).fetchone()[0]
advertisement = self.get_eligible_advertisement_row(cursor, tokenrow, height)
registrations = cursor.execute('''SELECT rowid, token FROM registration
WHERE address = ? AND advertisement = ? AND finished IS NULL''',
(user_address, advertisement)).fetchall()
if not registrations:
raise TokenError("No active registration was found")
if len(registrations) > 1:
raise ValueError("Multiple active registrations found")
registration = registrations[0]
if registration[1] != tokenrow:
raise ValueError("This registration token does not match the advertisement token")
cursor.execute('''UPDATE registration
SET updated = ?, finished = ?
WHERE rowid = ?''',
(blockrow, blockrow, registration[0]))
return registration[0]
def get_active_registrations_map(self, blockrow):
cursor = self.conn.cursor()
height = cursor.execute('''SELECT height FROM block WHERE rowid = ?''', (blockrow,)).fetchone()[0]
registrations = cursor.execute('''SELECT t.address, r.address, r.rowid
FROM registration r
LEFT JOIN advertisement a ON a.rowid = r.advertisement
LEFT JOIN token t ON t.rowid = a.token
WHERE r.finished IS NULL AND a.finished IS NULL AND a.begins <= ?''',
(height,)).fetchall()
reg_map = {}
if registrations:
for registration in registrations:
try:
records = reg_map[registration[0]]
except AttributeError:
records = {}
reg_map[registration[0]] = records
try:
rowid = records[registration[1]]
raise ValueError('Already have an active registration for this user and token')
except AttributeError:
records[registration[1]] = registration[2]
return reg_map
def registration_payment(self, txrow, blockrow, rowid, value):
cursor = self.conn.cursor()
height = cursor.execute('''SELECT height FROM block WHERE rowid = ?''', (blockrow,)).fetchone()[0]
details = cursor.execute('''SELECT r.address, r.maximum, r.payments, r.claimed,
a.rowid, a.delivers, a.available, a.claimed, a.rate, a.minimum, a.maximum,
t.rowid, t.address
FROM registration r
LEFT JOIN advertisement a ON a.rowid = r.advertisement
LEFT JOIN token t ON t.rowid = a.token
WHERE r.rowid = ?''',
(rowid,)).fetchone()
claimed = cursor.execute('''SELECT SUM(claimed)
FROM registration
WHERE address = ? AND advertisement = ? AND rowid <> ?''',
(details[0], details[4], rowid)).fetchone()[0]
ad_remaining = details[6] - details[7]
user_remaining = details[10] - claimed - details[3]
if ad_remaining < user_remaining:
user_remaining = ad_remaining
if details[1] < user_remaining:
user_remaining = details[1]
payments = details[2] + value
rate = details[8]
if rate:
if rate < 0:
units = payments // (-1 * rate)
else:
units = payments * rate
if units < details[9]:
units = 0
else:
units -= details[3]
if units > user_remaining:
units = user_remaining
else:
units = user_remaining
if units > 0:
available = (height > details[5])
# note that if height == delivers then process_advertisements() will make the units available
# update source balance
cursor.execute('''UPDATE balance
SET updated = ?, units = units - ?
WHERE token = ? AND address = ?''',
(blockrow, units, details[11], details[12]))
# update destination balance
balance = self._get_balance(cursor, details[11], details[0])
if balance is None:
cursor.execute('''INSERT INTO balance
(address, token, updated, units, available)
VALUES (?, ?, ?, ?, ?)''',
(details[0], details[11], blockrow, units, units if available else 0))
else:
cursor.execute('''UPDATE balance
SET updated = ?, units = units + ?, available = available + ?
WHERE token = ? AND address = ?''',
(blockrow, units, units if available else 0, details[11], details[0]))
# save transfer event
cursor.execute('''INSERT INTO transfer
(tx, created, addr_from, addr_to, units)
VALUES (?, ?, ?, ?, ?)''',
(txrow, blockrow, details[12], details[0], units))
finished = (units == (details[1] - details[3]))
cursor.execute('''UPDATE registration
SET updated = ?, finished = ?, payments = ?, claimed = claimed + ?
WHERE rowid = ?''',
(blockrow, blockrow if finished else None, payments, units, rowid))
cursor.execute('''UPDATE advertisement
SET updated = ?, claimed = claimed + ?
WHERE rowid = ?''',
(blockrow, units, details[4]))
def process_advertisements(self, blockrow):
cursor = self.conn.cursor()
height = cursor.execute('''SELECT height FROM block WHERE rowid = ?''', (blockrow,)).fetchone()[0]
deliveries = cursor.execute('''SELECT rowid, token
FROM advertisement
WHERE delivers = ?''',
(blockrow,)).fetchall()
if deliveries:
for delivery in deliveries:
registrations = cursor.execute('''SELECT rowid, address, claimed
FROM registration
WHERE advertisement = ?
ORDER BY address''',
(delivery[0],)).fetchall()
if registrations:
last_address = None
user_claimed = 0
for registration in registrations:
if last_address is None:
last_address = registration[1]
if last_address == registration[1]:
user_claimed += registration[2]
else:
cursor.execute('''UPDATE balance
SET updated = ?, available = available + ?
WHERE token = ? AND address = ?''',
(blockrow, user_claimed, delivery[1], last_address))
last_address = registration[1]
user_claimed = registration[2]
cursor.execute('''UPDATE balance
SET updated = ?, available = available + ?
WHERE token = ? AND address = ?''',
(blockrow, user_claimed, delivery[1], last_address))
ads_to_close = cursor.execute('''SELECT a.rowid, a.available - a.claimed,
t.rowid, t.address
FROM advertisement a
LEFT JOIN token t ON t.rowid = a.token
WHERE a.finished IS NULL AND a.claimed = a.available OR a.ends = ?''',
(height,)).fetchall()
if ads_to_close:
for advertisement in ads_to_close:
cursor.execute('''UPDATE registration
SET updated = ?, finished = ?
WHERE advertisement = ? AND finished IS NULL''',
(blockrow, blockrow, advertisement[0]))
cursor.execute('''UPDATE advertisement
SET updated = ?, finished = ?
WHERE rowid = ?''',
(blockrow, blockrow, advertisement[0]))
make_available = advertisement[1]
if make_available:
cursor.execute('''UPDATE balance
SET updated = ?, available = available + ?
WHERE token = ? AND address = ?''',
(blockrow, make_available, advertisement[2], advertisement[3]))
def get_user_tokens(self, address):
return [{
"address": row[0],
"symbol": row[1],
"decimals": row[2],
"name": row[3],
"units": row[4],
"available": row[5]
} for row in self.conn.execute('''
SELECT t.address, t.symbol, t.decimals, t.name, b.units, b.available
FROM balance b
LEFT JOIN token t ON t.rowid = b.token
WHERE b.address = ?''',
(address,)).fetchall()]
def hash(self, blockrow):
cursor = self.conn.cursor()
blocks = cursor.execute('''SELECT height, rowid, hash FROM block
WHERE orbit IS NULL
ORDER BY height''').fetchall()
if not blocks:
return None
if len(blocks) > 1:
raise ValueError('Multiple unhashed orbits detected... hash() must be called concurrently as blocks are inserted')
block = blocks[0]
height = block[0]
blockrow = block[1]
block_prev = cursor.execute('''SELECT orbit FROM block
WHERE height = ?''',
(height - 1,)).fetchone()
# hash the block and append to previous block hash
if block_prev:
data = block_prev[0]
else:
not_launch = cursor.execute('''SELECT 1 FROM block
WHERE height < ? LIMIT 1''',
(height,)).fetchone()
if not_launch:
raise ValueError('Missing block: {}'.format(height - 1))
data = b'\x42\x81' # special sequence to indicate launch
data += self._hash_cols(block)
# tokens and balances
data += self._hash_rows(cursor.execute('''SELECT * FROM token
WHERE updated = ?
ORDER BY rowid''',
(blockrow,)))
data += self._hash_rows(cursor.execute('''SELECT * FROM balance
WHERE updated = ?
ORDER BY rowid''',
(blockrow,)))
# events
data += self._hash_rows(cursor.execute('''SELECT * FROM transfer
WHERE created = ?
ORDER BY rowid''',
(blockrow,)))
data += self._hash_rows(cursor.execute('''SELECT * FROM advertisement
WHERE updated = ?
ORDER BY rowid''',
(blockrow,)))
data += self._hash_rows(cursor.execute('''SELECT * FROM registration
WHERE updated = ?
ORDER BY rowid''',
(blockrow,)))
# final hash and save
orbit = self._hash(data)
cursor.execute('''UPDATE block
SET orbit = ?
WHERE rowid = ?''',
(sqlite3.Binary(orbit), blockrow))
return orbit
def _hash_rows(self, rows):
if not rows:
return b'\x00'
data = b'\x01'
for row in rows:
data += self._hash_cols(row)
data += b'\xFF'
return self._hash(data)
def _hash_cols(self, cols):
data = '['
for col in cols:
if col:
data += '{}'.format(col)
data += '|'
data += ']'
return self._hash(data.encode('utf-8'))
def _hash(self, data):
return sha256(sha256(data).digest()).digest()
|
py | b40a828393afd0237fd98c59678c8838005c8374 | #!/usr/bin/env python 3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 PanXu, Inc. All Rights Reserved
#
"""
测试 max label index decoder
Authors: PanXu
Date: 2020/07/06 10:23:00
"""
import torch
from easytext.tests import ASSERT
from easytext.label_decoder import MaxLabelIndexDecoder
def test_max_label_index_decoder():
"""
测试 max label index
:return:
"""
decoder = MaxLabelIndexDecoder()
logits = torch.tensor([[0.1, 0.9], [0.3, 0.7], [0.8, 0.2]])
label_indices = decoder(logits=logits)
expect = [1, 1, 0]
ASSERT.assertListEqual(expect, label_indices.tolist())
|
py | b40a83437935b1bf76e6f0666d03e402ba8c9c97 | import torch
import torch.nn as nn
from config import train_config
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from transformers import AdamW
from utils.env import get_device
def model_function(train_loader: DataLoader, dev_loader: DataLoader,
model: nn.Module, tokenizer):
device, accelerator = get_device()
model.to(device)
optimizer = AdamW(model.parameters(), lr=train_config.learning_rate)
# Add learning rate decay
scheduler = StepLR(optimizer, step_size=1, gamma=0.8)
if train_config.fp16_training:
model, optimizer, train_loader = accelerator.prepare(
model, optimizer, train_loader)
print("Start Training ...")
model.train()
dev_acc = -1
for epoch in range(train_config.num_epoch):
step = 1
train_loss = train_acc = 0
for data in tqdm(train_loader):
# Load all data into GPU
data = [i.to(device) for i in data]
# Model inputs: input_ids, token_type_ids, attention_mask, start_positions, end_positions (Note: only "input_ids" is mandatory)
# Model outputs: start_logits, end_logits, loss (return when start_positions/end_positions are provided)
# output: 16*193
output = model(input_ids=data[0],
token_type_ids=data[1],
attention_mask=data[2],
start_positions=data[3],
end_positions=data[4])
# Choose the most probable start position / end position
start_index = torch.argmax(output.start_logits, dim=1)
end_index = torch.argmax(output.end_logits, dim=1)
# Prediction is correct only if both start_index and end_index are correct
train_acc += ((start_index == data[3]) &
(end_index == data[4])).float().mean()
train_loss += output.loss
optimizer.zero_grad()
if train_config.fp16_training:
accelerator.backward(output.loss)
else:
output.loss.backward()
optimizer.step()
step += 1
# Print training loss and accuracy over past logging step
if step % train_config.logging_step == 0:
print(
f"Epoch {epoch + 1} | Step {step} | loss = {train_loss.item() / train_config.logging_step:.3f}, acc = {train_acc / train_config.logging_step:.3f}"
)
train_loss = train_acc = 0
scheduler.step() # update learning rate every epoch instead of batch
if train_config.validation:
temp_dev_acc = validate(model, tokenizer, epoch, dev_loader, device)
if temp_dev_acc > dev_acc:
dev_acc = temp_dev_acc
save_model(model)
def validate(model: nn.Module, tokenizer, epoch: int, dev_loader: DataLoader,
device):
print("Evaluating Dev Set ...")
model.eval()
with torch.no_grad():
dev_acc = 0
for i, data in enumerate(tqdm(dev_loader)):
output = model(input_ids=data[0].squeeze(dim=0).to(device),
token_type_ids=data[1].squeeze(dim=0).to(device),
attention_mask=data[2].squeeze(dim=0).to(device))
# prediction is correct only if answer text exactly matches
dev_acc += translate(data, output, tokenizer) \
== dev_loader.dataset.questions[i]["answer_text"]
print(
f"Validation | Epoch {epoch + 1} | acc = {dev_acc / len(dev_loader):.3f}"
)
model.train()
return dev_acc
def translate(data, output, tokenizer):
##### TODO: Postprocessing #####
# There is a bug and room for improvement in postprocessing
# Hint: Open your prediction file to see what is wrong
answer = ''
max_prob = float('-inf')
num_of_windows = data[0].shape[1] # data[0/1/2]: 1*4*193
for k in range(num_of_windows):
# Obtain answer by choosing the most probable start position / end position
# start_logits: num_of_window*193
start_prob, start_index = torch.max(output.start_logits[k], dim=0)
end_prob, end_index = torch.max(output.end_logits[k][start_index:],
dim=0)
end_index += start_index
# Probability of answer is calculated as sum of start_prob and end_prob
prob = start_prob + end_prob
# Replace answer if calculated probability is larger than previous windows
if prob > max_prob:
max_prob = prob
# Convert tokens to chars (e.g. [1920, 7032] --> "大 金")
answer = tokenizer.decode(data[0][0][k][start_index:end_index + 1])
# Remove spaces in answer (e.g. "大 金" --> "大金")
return answer.replace(' ', '')
def save_model(model: nn.Module):
print("Saving Model ...")
model_save_dir = "checkpoints"
model.save_pretrained(model_save_dir)
|
py | b40a848e9ab0405f705a0e0553552770dad294ca | import mathExt as M
print(sum([int(i) for i in str(M.factorial(100))])) |
py | b40a84cbfa1978a3fcebb4fe9be7d73a081fe932 | '''
Copyright (C) 2021 CG Cookie
http://cgcookie.com
[email protected]
Created by Jonathan Denning, Jonathan Williamson, and Patrick Moore
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import random
import bgl
from mathutils.geometry import intersect_line_line_2d as intersect2d_segment_segment
from ..rftool import RFTool
from ..rfwidgets.rfwidget_default import RFWidget_Default_Factory
from ..rfmesh.rfmesh_wrapper import RFVert, RFEdge, RFFace
from ...addon_common.common.drawing import (
CC_DRAW,
CC_2D_POINTS,
CC_2D_LINES, CC_2D_LINE_LOOP,
CC_2D_TRIANGLES, CC_2D_TRIANGLE_FAN,
)
from ...addon_common.common.profiler import profiler
from ...addon_common.common.maths import Point, Point2D, Vec2D, Vec, Direction2D, intersection2d_line_line, closest2d_point_segment
from ...addon_common.common.fsm import FSM
from ...addon_common.common.globals import Globals
from ...addon_common.common.utils import iter_pairs
from ...addon_common.common.blender import tag_redraw_all
from ...addon_common.common.drawing import DrawCallbacks
from ...addon_common.common.boundvar import BoundBool, BoundInt, BoundFloat, BoundString
from ...config.options import options, themes
class PolyPen(RFTool):
name = 'PolyPen'
description = 'Create complex topology on vertex-by-vertex basis'
icon = 'polypen-icon.png'
help = 'polypen.md'
shortcut = 'polypen tool'
statusbar = '{{insert}} Insert'
ui_config = 'polypen_options.html'
RFWidget_Default = RFWidget_Default_Factory.create('PolyPen default')
RFWidget_Crosshair = RFWidget_Default_Factory.create('PolyPen crosshair', 'CROSSHAIR')
RFWidget_Move = RFWidget_Default_Factory.create('PolyPen move', 'HAND')
RFWidget_Knife = RFWidget_Default_Factory.create('PolyPen knife', 'KNIFE')
@RFTool.on_init
def init(self):
self.rfwidgets = {
'default': self.RFWidget_Default(self),
'insert': self.RFWidget_Crosshair(self),
'hover': self.RFWidget_Move(self),
'knife': self.RFWidget_Knife(self),
}
self.rfwidget = None
self.update_state_info()
self.first_time = True
self._var_merge_dist = BoundFloat( '''options['polypen merge dist'] ''')
self._var_automerge = BoundBool( '''options['polypen automerge'] ''')
self._var_insert_mode = BoundString('''options['polypen insert mode']''')
self.previs_timer = self.actions.start_timer(120.0, enabled=False)
def update_insert_mode(self):
mode = options['polypen insert mode']
self.ui_options_label.innerText = f'PolyPen: {mode}'
self.ui_insert_modes.dirty(cause='insert mode change', children=True)
@RFTool.on_ui_setup
def ui(self):
ui_options = self.document.body.getElementById('polypen-options')
self.ui_options_label = ui_options.getElementById('polypen-summary-label')
self.ui_insert_modes = ui_options.getElementById('polypen-insert-modes')
self.update_insert_mode()
@RFTool.on_reset
def reset(self):
self.previs_timer.stop()
@RFTool.on_reset
@RFTool.on_target_change
@RFTool.on_view_change
@FSM.onlyinstate('main')
# @profiler.function
def update_state_info(self):
if True: # with profiler.code('getting selected geometry'):
self.sel_verts = self.rfcontext.rftarget.get_selected_verts()
self.sel_edges = self.rfcontext.rftarget.get_selected_edges()
self.sel_faces = self.rfcontext.rftarget.get_selected_faces()
if True: # with profiler.code('getting visible geometry'):
self.vis_accel = self.rfcontext.get_vis_accel()
self.vis_verts = self.rfcontext.accel_vis_verts
self.vis_edges = self.rfcontext.accel_vis_edges
self.vis_faces = self.rfcontext.accel_vis_faces
if self.rfcontext.loading_done:
self.set_next_state(force=True)
@RFTool.on_mouse_stop
@FSM.onlyinstate({'main'})
def update_next_state_mouse(self):
self.set_next_state(force=True)
tag_redraw_all('PolyPen mouse stop')
# @profiler.function
def set_next_state(self, force=False):
'''
determines what the next state will be, based on selected mode, selected geometry, and hovered geometry
'''
if not self.actions.mouse and not force: return
if True: # with profiler.code('getting nearest geometry'):
self.nearest_vert,_ = self.rfcontext.accel_nearest2D_vert(max_dist=options['polypen merge dist'])
self.nearest_edge,_ = self.rfcontext.accel_nearest2D_edge(max_dist=options['polypen merge dist'])
self.nearest_face,_ = self.rfcontext.accel_nearest2D_face(max_dist=options['polypen merge dist'])
self.nearest_geom = self.nearest_vert or self.nearest_edge or self.nearest_face
# determine next state based on current selection, hovered geometry
num_verts = len(self.sel_verts)
num_edges = len(self.sel_edges)
num_faces = len(self.sel_faces)
if self.nearest_edge and self.nearest_edge.select: # overriding: if hovering over a selected edge, knife it!
self.next_state = 'knife selected edge'
elif options['polypen insert mode'] == 'Tri/Quad':
if num_verts == 1 and num_edges == 0 and num_faces == 0:
self.next_state = 'vert-edge'
elif num_edges and num_faces == 0:
quad_snap = False
if not self.nearest_vert and self.nearest_edge:
quad_snap = True
quad_snap &= len(self.nearest_edge.link_faces) <= 1
quad_snap &= not any(v in self.sel_verts for v in self.nearest_edge.verts)
quad_snap &= not any(e in f.edges for v in self.nearest_edge.verts for f in v.link_faces for e in self.sel_edges)
if quad_snap:
self.next_state = 'edge-quad-snap'
else:
self.next_state = 'edge-face'
elif num_verts == 3 and num_edges == 3 and num_faces == 1:
self.next_state = 'tri-quad'
else:
self.next_state = 'new vertex'
elif options['polypen insert mode'] == 'Quad-Only':
# a Desmos construction of how this works: https://www.desmos.com/geometry/bmmx206thi
if num_verts == 1 and num_edges == 0 and num_faces == 0:
self.next_state = 'vert-edge'
elif num_edges:
quad_snap = False
if not self.nearest_vert and self.nearest_edge:
quad_snap = True
quad_snap &= len(self.nearest_edge.link_faces) <= 1
quad_snap &= not any(v in self.sel_verts for v in self.nearest_edge.verts)
quad_snap &= not any(e in f.edges for v in self.nearest_edge.verts for f in v.link_faces for e in self.sel_edges)
if quad_snap:
self.next_state = 'edge-quad-snap'
else:
self.next_state = 'edge-quad'
else:
self.next_state = 'new vertex'
elif options['polypen insert mode'] == 'Tri-Only':
if num_verts == 1 and num_edges == 0 and num_faces == 0:
self.next_state = 'vert-edge'
elif num_edges and num_faces == 0:
quad = False
if not self.nearest_vert and self.nearest_edge:
quad = True
quad &= len(self.nearest_edge.link_faces) <= 1
quad &= not any(v in self.sel_verts for v in self.nearest_edge.verts)
quad &= not any(e in f.edges for v in self.nearest_edge.verts for f in v.link_faces for e in self.sel_edges)
if quad:
self.next_state = 'edge-quad-snap'
else:
self.next_state = 'edge-face'
elif num_verts == 3 and num_edges == 3 and num_faces == 1:
self.next_state = 'edge-face'
else:
self.next_state = 'new vertex'
elif options['polypen insert mode'] == 'Edge-Only':
if num_verts == 0:
self.next_state = 'new vertex'
else:
if self.nearest_edge:
self.next_state = 'vert-edge'
else:
self.next_state = 'vert-edge-vert'
else:
assert False, f'Unhandled PolyPen insert mode: {options["polypen insert mode"]}'
@FSM.on_state('main', 'enter')
def main_enter(self):
self.update_state_info()
@FSM.on_state('main')
def main(self):
if self.first_time:
self.set_next_state(force=True)
self.first_time = False
tag_redraw_all('PolyPen mousemove')
self.previs_timer.enable(self.actions.using_onlymods('insert'))
if self.actions.using_onlymods('insert'):
if self.next_state == 'knife selected edge':
self.rfwidget = self.rfwidgets['knife']
else:
self.rfwidget = self.rfwidgets['insert']
elif self.nearest_geom and self.nearest_geom.select:
self.rfwidget = self.rfwidgets['hover']
else:
self.rfwidget = self.rfwidgets['default']
for rfwidget in self.rfwidgets.values():
if self.rfwidget == rfwidget: continue
if rfwidget.inactive_passthrough():
self.rfwidget = rfwidget
return
if self.actions.pressed('pie menu alt0'):
def callback(option):
if not option: return
options['polypen insert mode'] = option
self.update_insert_mode()
self.rfcontext.show_pie_menu([
'Tri/Quad',
'Quad-Only',
'Tri-Only',
'Edge-Only',
], callback, highlighted=options['polypen insert mode'])
return
if self.actions.pressed('insert'):
return 'insert'
if self.nearest_geom and self.nearest_geom.select:
if self.actions.pressed('action'):
self.rfcontext.undo_push('grab')
self.prep_move(defer_recomputing=False)
return 'move after select'
if self.actions.pressed({'select path add'}):
return self.rfcontext.select_path(
{'edge', 'face'},
kwargs_select={'supparts': False},
)
if self.actions.pressed({'select paint', 'select paint add'}, unpress=False):
sel_only = self.actions.pressed('select paint')
self.actions.unpress()
return self.rfcontext.setup_smart_selection_painting(
{'vert','edge','face'},
selecting=not sel_only,
deselect_all=sel_only,
kwargs_select={'supparts': False},
kwargs_deselect={'subparts': False},
)
if self.actions.pressed({'select single', 'select single add'}, unpress=False):
sel_only = self.actions.pressed('select single')
self.actions.unpress()
bmv,_ = self.rfcontext.accel_nearest2D_vert(max_dist=options['select dist'])
bme,_ = self.rfcontext.accel_nearest2D_edge(max_dist=options['select dist'])
bmf,_ = self.rfcontext.accel_nearest2D_face(max_dist=options['select dist'])
sel = bmv or bme or bmf
if not sel_only and not sel: return
self.rfcontext.undo_push('select')
if sel_only: self.rfcontext.deselect_all()
if not sel: return
if sel.select: self.rfcontext.deselect(sel, subparts=False)
else: self.rfcontext.select(sel, supparts=False, only=sel_only)
return
if self.actions.pressed('grab'):
self.rfcontext.undo_push('move grabbed')
self.prep_move()
self.move_done_pressed = ['confirm', 'confirm drag']
self.move_done_released = None
self.move_cancelled = 'cancel'
return 'move'
def set_vis_bmverts(self):
self.vis_bmverts = [
(bmv, self.rfcontext.Point_to_Point2D(bmv.co))
for bmv in self.vis_verts
if bmv.is_valid and bmv not in self.sel_verts
]
@FSM.on_state('insert')
def insert(self):
self.rfcontext.undo_push('insert')
return self._insert()
def _get_edge_quad_verts(self):
'''
this function is used in quad-only mode to find positions of quad verts based on selected edge and mouse position
a Desmos construction of how this works: https://www.desmos.com/geometry/5w40xowuig
'''
e0,_ = self.rfcontext.nearest2D_edge(edges=self.sel_edges)
if not e0: return (None, None, None, None)
bmv0,bmv1 = e0.verts
xy0 = self.rfcontext.Point_to_Point2D(bmv0.co)
xy1 = self.rfcontext.Point_to_Point2D(bmv1.co)
d01 = (xy0 - xy1).length
mid01 = xy0 + (xy1 - xy0) / 2
mid23 = self.actions.mouse
mid0123 = mid01 + (mid23 - mid01) / 2
between = mid23 - mid01
if between.length < 0.0001: return (None, None, None, None)
perp = Direction2D((-between.y, between.x))
if perp.dot(xy1 - xy0) < 0: perp.reverse()
#pts = intersect_line_line(xy0, xy1, mid0123, mid0123 + perp)
#if not pts: return (None, None, None, None)
#intersection = pts[1]
intersection = intersection2d_line_line(xy0, xy1, mid0123, mid0123 + perp)
if not intersection: return (None, None, None, None)
intersection = Point2D(intersection)
toward = Direction2D(mid23 - intersection)
if toward.dot(perp) < 0: d01 = -d01
# push intersection out just a bit to make it more stable (prevent crossing) when |between| < d01
between_len = between.length * Direction2D(xy1 - xy0).dot(perp)
for tries in range(32):
v = toward * (d01 / 2)
xy2, xy3 = mid23 + v, mid23 - v
# try to prevent quad from crossing
v03 = xy3 - xy0
if v03.dot(between) < 0 or v03.length < between_len:
xy3 = xy0 + Direction2D(v03) * (between_len * (-1 if v03.dot(between) < 0 else 1))
v12 = xy2 - xy1
if v12.dot(between) < 0 or v12.length < between_len:
xy2 = xy1 + Direction2D(v12) * (between_len * (-1 if v12.dot(between) < 0 else 1))
if self.rfcontext.raycast_sources_Point2D(xy2)[0] and self.rfcontext.raycast_sources_Point2D(xy3)[0]: break
d01 /= 2
else:
return (None, None, None, None)
nearest_vert,_ = self.rfcontext.nearest2D_vert(point=xy2, verts=self.vis_verts, max_dist=options['polypen merge dist'])
if nearest_vert: xy2 = self.rfcontext.Point_to_Point2D(nearest_vert.co)
nearest_vert,_ = self.rfcontext.nearest2D_vert(point=xy3, verts=self.vis_verts, max_dist=options['polypen merge dist'])
if nearest_vert: xy3 = self.rfcontext.Point_to_Point2D(nearest_vert.co)
return (xy0, xy1, xy2, xy3)
@RFTool.dirty_when_done
def _insert(self):
self.last_delta = None
self.move_done_pressed = None
self.move_done_released = 'insert'
self.move_cancelled = 'cancel'
if self.actions.shift and not self.actions.ctrl and not self.next_state in ['new vertex', 'vert-edge']:
self.next_state = 'vert-edge'
nearest_vert,_ = self.rfcontext.nearest2D_vert(verts=self.sel_verts, max_dist=options['polypen merge dist'])
self.rfcontext.select(nearest_vert)
sel_verts = self.sel_verts
sel_edges = self.sel_edges
sel_faces = self.sel_faces
if self.next_state == 'knife selected edge': # overriding: if hovering over a selected edge, knife it!
# self.nearest_edge and self.nearest_edge.select:
#print('knifing selected, hovered edge')
bmv = self.rfcontext.new2D_vert_mouse()
if not bmv:
self.rfcontext.undo_cancel()
return 'main'
bme0,bmv2 = self.nearest_edge.split()
bmv.merge(bmv2)
self.rfcontext.select(bmv)
self.mousedown = self.actions.mousedown
xy = self.rfcontext.Point_to_Point2D(bmv.co)
if not xy:
#print('Could not insert: ' + str(bmv.co))
self.rfcontext.undo_cancel()
return 'main'
self.bmverts = [(bmv, xy)] if bmv else []
self.set_vis_bmverts()
return 'move'
if self.next_state in {'vert-edge', 'vert-edge-vert'}:
bmv0 = next(iter(sel_verts))
if self.next_state == 'vert-edge':
nearest_vert,dist = self.rfcontext.nearest2D_vert(verts=self.vis_verts, max_dist=options['polypen merge dist'])
if nearest_vert:
bmv1 = nearest_vert
lbmf = bmv0.shared_faces(bmv1)
if len(lbmf) == 1 and not bmv0.share_edge(bmv1):
# split face
bmf = lbmf[0]
bmf.split(bmv0, bmv1)
self.rfcontext.select(bmv1)
return 'main'
nearest_edge,dist = self.rfcontext.nearest2D_edge(edges=self.vis_edges)
bmv1 = self.rfcontext.new2D_vert_mouse()
if not bmv1:
self.rfcontext.undo_cancel()
return 'main'
if dist is not None and dist < self.rfcontext.drawing.scale(15):
if bmv0 in nearest_edge.verts:
# selected vert already part of edge; split
bme0,bmv2 = nearest_edge.split()
bmv1.merge(bmv2)
self.rfcontext.select(bmv1)
else:
bme0,bmv2 = nearest_edge.split()
bmv1.merge(bmv2)
bmf = next(iter(bmv0.shared_faces(bmv1)), None)
if bmf:
if not bmv0.share_edge(bmv1):
bmf.split(bmv0, bmv1)
if not bmv0.share_face(bmv1):
bme = self.rfcontext.new_edge((bmv0, bmv1))
self.rfcontext.select(bme)
self.rfcontext.select(bmv1)
else:
bme = self.rfcontext.new_edge((bmv0, bmv1))
self.rfcontext.select(bme)
elif self.next_state == 'vert-edge-vert':
if self.nearest_vert:
bmv1 = self.nearest_vert
else:
bmv1 = self.rfcontext.new2D_vert_mouse()
if not bmv1:
self.rfcontext.undo_cancel()
return 'main'
bme = bmv0.shared_edge(bmv1) or self.rfcontext.new_edge((bmv0, bmv1))
self.rfcontext.select(bmv1)
else:
return 'main'
self.mousedown = self.actions.mousedown
xy = self.rfcontext.Point_to_Point2D(bmv1.co)
if not xy:
# dprint('Could not insert: ' + str(bmv1.co))
pass
self.rfcontext.undo_cancel()
return 'main'
self.bmverts = [(bmv1, xy)] if bmv1 else []
self.set_vis_bmverts()
return 'move'
if self.next_state == 'edge-face':
bme,_ = self.rfcontext.nearest2D_edge(edges=self.sel_edges)
if not bme: return
bmv0,bmv1 = bme.verts
if self.nearest_vert and not self.nearest_vert.select:
bmv2 = self.nearest_vert
bmf = self.rfcontext.new_face([bmv0, bmv1, bmv2])
self.rfcontext.clean_duplicate_bmedges(bmv2)
else:
bmv2 = self.rfcontext.new2D_vert_mouse()
if not bmv2:
self.rfcontext.undo_cancel()
return 'main'
bmf = self.rfcontext.new_face([bmv0, bmv1, bmv2])
self.rfcontext.select(bmf)
self.mousedown = self.actions.mousedown
xy = self.rfcontext.Point_to_Point2D(bmv2.co)
if not xy:
# dprint('Could not insert: ' + str(bmv2.co))
pass
self.rfcontext.undo_cancel()
return 'main'
self.bmverts = [(bmv2, xy)] if bmv2 else []
self.set_vis_bmverts()
return 'move'
if self.next_state == 'edge-quad':
xy0,xy1,xy2,xy3 = self._get_edge_quad_verts()
if xy0 is None or xy1 is None or xy2 is None or xy3 is None: return
# a Desmos construction of how this works: https://www.desmos.com/geometry/bmmx206thi
e0,_ = self.rfcontext.nearest2D_edge(edges=self.sel_edges)
if not e0: return
bmv0,bmv1 = e0.verts
bmv2,_ = self.rfcontext.nearest2D_vert(point=xy2, verts=self.vis_verts, max_dist=options['polypen merge dist'])
if not bmv2: bmv2 = self.rfcontext.new2D_vert_point(xy2)
bmv3,_ = self.rfcontext.nearest2D_vert(point=xy3, verts=self.vis_verts, max_dist=options['polypen merge dist'])
if not bmv3: bmv3 = self.rfcontext.new2D_vert_point(xy3)
if not bmv2 or not bmv3:
self.rfcontext.undo_cancel()
return 'main'
e1 = bmv2.shared_edge(bmv3)
if not e1: e1 = self.rfcontext.new_edge([bmv2, bmv3])
bmf = self.rfcontext.new_face([bmv0, bmv1, bmv2, bmv3])
bmes = [bmv1.shared_edge(bmv2), bmv0.shared_edge(bmv3), bmv2.shared_edge(bmv3)]
self.rfcontext.select(bmes, subparts=False)
self.mousedown = self.actions.mousedown
self.bmverts = []
if bmv2: self.bmverts.append((bmv2, self.rfcontext.Point_to_Point2D(bmv2.co)))
if bmv3: self.bmverts.append((bmv3, self.rfcontext.Point_to_Point2D(bmv3.co)))
self.set_vis_bmverts()
return 'move'
if self.next_state == 'edge-quad-snap':
e0,_ = self.rfcontext.nearest2D_edge(edges=self.sel_edges)
e1 = self.nearest_edge
if not e0 or not e1: return
bmv0,bmv1 = e0.verts
bmv2,bmv3 = e1.verts
p0,p1 = self.rfcontext.Point_to_Point2D(bmv0.co),self.rfcontext.Point_to_Point2D(bmv1.co)
p2,p3 = self.rfcontext.Point_to_Point2D(bmv2.co),self.rfcontext.Point_to_Point2D(bmv3.co)
if intersect2d_segment_segment(p1, p2, p3, p0): bmv2,bmv3 = bmv3,bmv2
# if e0.vector2D(self.rfcontext.Point_to_Point2D).dot(e1.vector2D(self.rfcontext.Point_to_Point2D)) > 0:
# bmv2,bmv3 = bmv3,bmv2
bmf = self.rfcontext.new_face([bmv0, bmv1, bmv2, bmv3])
# select all non-manifold edges that share vertex with e1
bmes = [e for e in bmv2.link_edges + bmv3.link_edges if not e.is_manifold and not e.share_face(e1)]
if not bmes:
bmes = [bmv1.shared_edge(bmv2), bmv0.shared_edge(bmv3)]
self.rfcontext.select(bmes, subparts=False)
return 'main'
if self.next_state == 'tri-quad':
hit_pos = self.actions.hit_pos
if not hit_pos:
self.rfcontext.undo_cancel()
return 'main'
if not self.sel_edges:
return 'main'
bme0,_ = self.rfcontext.nearest2D_edge(edges=self.sel_edges)
if not bme0: return
bmv0,bmv2 = bme0.verts
bme1,bmv1 = bme0.split()
bme0.select = True
bme1.select = True
self.rfcontext.select(bmv1.link_edges)
if self.nearest_vert and not self.nearest_vert.select:
self.nearest_vert.merge(bmv1)
bmv1 = self.nearest_vert
self.rfcontext.clean_duplicate_bmedges(bmv1)
for bme in bmv1.link_edges: bme.select &= len(bme.link_faces)==1
bme01,bme12 = bmv0.shared_edge(bmv1),bmv1.shared_edge(bmv2)
if len(bme01.link_faces) == 1: bme01.select = True
if len(bme12.link_faces) == 1: bme12.select = True
else:
bmv1.co = hit_pos
self.mousedown = self.actions.mousedown
self.rfcontext.select(bmv1, only=False)
xy = self.rfcontext.Point_to_Point2D(bmv1.co)
if not xy:
# dprint('Could not insert: ' + str(bmv3.co))
pass
self.rfcontext.undo_cancel()
return 'main'
self.bmverts = [(bmv1, xy)] if bmv1 else []
self.set_vis_bmverts()
return 'move'
nearest_edge,d = self.rfcontext.nearest2D_edge(edges=self.vis_edges)
bmv = self.rfcontext.new2D_vert_mouse()
if not bmv:
self.rfcontext.undo_cancel()
return 'main'
if d is not None and d < self.rfcontext.drawing.scale(15):
bme0,bmv2 = nearest_edge.split()
bmv.merge(bmv2)
self.rfcontext.select(bmv)
self.mousedown = self.actions.mousedown
xy = self.rfcontext.Point_to_Point2D(bmv.co)
if not xy:
# dprint('Could not insert: ' + str(bmv.co))
pass
self.rfcontext.undo_cancel()
return 'main'
self.bmverts = [(bmv, xy)] if bmv else []
self.set_vis_bmverts()
return 'move'
def mergeSnapped(self):
""" Merging colocated visible verts """
if not options['polypen automerge']: return
# TODO: remove colocated faces
if self.mousedown is None: return
delta = Vec2D(self.actions.mouse - self.mousedown)
set2D_vert = self.rfcontext.set2D_vert
update_verts = []
merge_dist = self.rfcontext.drawing.scale(options['polypen merge dist'])
for bmv,xy in self.bmverts:
if not xy: continue
xy_updated = xy + delta
for bmv1,xy1 in self.vis_bmverts:
if not xy1: continue
if bmv1 == bmv: continue
if not bmv1.is_valid: continue
d = (xy_updated - xy1).length
if (xy_updated - xy1).length > merge_dist:
continue
bmv1.merge_robust(bmv)
self.rfcontext.select(bmv1)
update_verts += [bmv1]
break
if update_verts:
self.rfcontext.update_verts_faces(update_verts)
self.set_next_state()
def prep_move(self, bmverts=None, defer_recomputing=True):
if not bmverts: bmverts = self.sel_verts
self.bmverts = [(bmv, self.rfcontext.Point_to_Point2D(bmv.co)) for bmv in bmverts if bmv and bmv.is_valid]
self.set_vis_bmverts()
self.mousedown = self.actions.mouse
self.last_delta = None
self.defer_recomputing = defer_recomputing
@FSM.on_state('move after select')
# @profiler.function
def modal_move_after_select(self):
if self.actions.released('action'):
return 'main'
if (self.actions.mouse - self.mousedown).length > 7:
self.last_delta = None
self.move_done_pressed = None
self.move_done_released = 'action'
self.move_cancelled = 'cancel'
self.rfcontext.undo_push('move after select')
return 'move'
@FSM.on_state('move', 'enter')
def move_enter(self):
self.move_opts = {
'vis_accel': self.rfcontext.get_custom_vis_accel(selection_only=False, include_edges=False, include_faces=False),
}
self.rfcontext.split_target_visualization_selected()
self.previs_timer.start()
self.rfcontext.set_accel_defer(True)
@FSM.on_state('move')
# @profiler.function
def modal_move(self):
if self.move_done_pressed and self.actions.pressed(self.move_done_pressed):
self.defer_recomputing = False
self.mergeSnapped()
return 'main'
if self.move_done_released and self.actions.released(self.move_done_released, ignoremods=True):
self.defer_recomputing = False
self.mergeSnapped()
return 'main'
if self.move_cancelled and self.actions.pressed('cancel'):
self.defer_recomputing = False
self.rfcontext.undo_cancel()
return 'main'
if not self.actions.mousemove_stop: return
# # only update verts on timer events and when mouse has moved
# if not self.actions.timer: return
# if self.actions.mouse_prev == self.actions.mouse: return
delta = Vec2D(self.actions.mouse - self.mousedown)
if delta == self.last_delta: return
self.last_delta = delta
set2D_vert = self.rfcontext.set2D_vert
for bmv,xy in self.bmverts:
if not xy: continue
xy_updated = xy + delta
# check if xy_updated is "close" to any visible verts (in image plane)
# if so, snap xy_updated to vert position (in image plane)
if options['polypen automerge']:
bmv1,d = self.rfcontext.accel_nearest2D_vert(point=xy_updated, vis_accel=self.move_opts['vis_accel'], max_dist=options['polypen merge dist'])
if bmv1 is None:
set2D_vert(bmv, xy_updated)
continue
xy1 = self.rfcontext.Point_to_Point2D(bmv1.co)
if not xy1:
set2D_vert(bmv, xy_updated)
continue
set2D_vert(bmv, xy1)
else:
set2D_vert(bmv, xy_updated)
self.rfcontext.update_verts_faces(v for v,_ in self.bmverts)
self.rfcontext.dirty()
@FSM.on_state('move', 'exit')
def move_exit(self):
self.previs_timer.stop()
self.rfcontext.set_accel_defer(False)
self.rfcontext.clear_split_target_visualization()
def draw_lines(self, coords, poly_alpha=0.2):
line_color = themes['new']
poly_color = [line_color[0], line_color[1], line_color[2], line_color[3] * poly_alpha]
l = len(coords)
coords = [self.rfcontext.Point_to_Point2D(co) for co in coords]
if not all(coords): return
if l == 1:
with Globals.drawing.draw(CC_2D_POINTS) as draw:
draw.color(line_color)
for c in coords:
draw.vertex(c)
elif l == 2:
with Globals.drawing.draw(CC_2D_LINES) as draw:
draw.color(line_color)
draw.vertex(coords[0])
draw.vertex(coords[1])
else:
with Globals.drawing.draw(CC_2D_LINE_LOOP) as draw:
draw.color(line_color)
for co in coords: draw.vertex(co)
with Globals.drawing.draw(CC_2D_TRIANGLE_FAN) as draw:
draw.color(poly_color)
draw.vertex(coords[0])
for co1,co2 in iter_pairs(coords[1:], False):
draw.vertex(co1)
draw.vertex(co2)
@DrawCallbacks.on_draw('post2d')
@FSM.onlyinstate('main')
def draw_postpixel(self):
# TODO: put all logic into set_next_state(), such as vertex snapping, edge splitting, etc.
#if self.rfcontext.nav or self.mode != 'main': return
if not self.actions.using_onlymods('insert'): return # 'insert alt1'??
hit_pos = self.actions.hit_pos
if not hit_pos: return
self.set_next_state()
bgl.glEnable(bgl.GL_BLEND)
CC_DRAW.stipple(pattern=[4,4])
CC_DRAW.point_size(8)
CC_DRAW.line_width(2)
if self.next_state == 'knife selected edge':
bmv1,bmv2 = self.nearest_edge.verts
faces = self.nearest_edge.link_faces
if faces:
for f in faces:
lco = []
for v0,v1 in iter_pairs(f.verts, True):
lco.append(v0.co)
if (v0 == bmv1 and v1 == bmv2) or (v0 == bmv2 and v1 == bmv1):
lco.append(hit_pos)
self.draw_lines(lco)
else:
self.draw_lines([bmv1.co, hit_pos])
self.draw_lines([bmv2.co, hit_pos])
elif self.next_state == 'new vertex':
p0 = hit_pos
e1,d = self.rfcontext.nearest2D_edge(edges=self.vis_edges)
if e1:
bmv1,bmv2 = e1.verts
if d is not None and d < self.rfcontext.drawing.scale(15):
f = next(iter(e1.link_faces), None)
if f:
lco = []
for v0,v1 in iter_pairs(f.verts, True):
lco.append(v0.co)
if (v0 == bmv1 and v1 == bmv2) or (v0 == bmv2 and v1 == bmv1):
lco.append(p0)
self.draw_lines(lco)
else:
self.draw_lines([bmv1.co, hit_pos])
self.draw_lines([bmv2.co, hit_pos])
else:
self.draw_lines([hit_pos])
else:
self.draw_lines([hit_pos])
elif self.next_state in {'vert-edge', 'vert-edge-vert'}:
sel_verts = self.sel_verts
bmv0 = next(iter(sel_verts))
if self.nearest_vert:
p0 = self.nearest_vert.co
elif self.next_state == 'vert-edge':
p0 = hit_pos
e1,d = self.rfcontext.nearest2D_edge(edges=self.vis_edges)
if e1:
bmv1,bmv2 = e1.verts
if d is not None and d < self.rfcontext.drawing.scale(15):
f = next(iter(e1.link_faces), None)
if f:
lco = []
for v0,v1 in iter_pairs(f.verts, True):
lco.append(v0.co)
if (v0 == bmv1 and v1 == bmv2) or (v0 == bmv2 and v1 == bmv1):
lco.append(p0)
self.draw_lines(lco)
else:
self.draw_lines([bmv1.co, p0])
self.draw_lines([bmv2.co, p0])
elif self.next_state == 'vert-edge-vert':
p0 = hit_pos
else:
return
self.draw_lines([bmv0.co, p0])
elif self.actions.shift and not self.actions.ctrl:
if self.next_state in ['edge-face', 'edge-quad', 'edge-quad-snap', 'tri-quad']:
nearest_vert,_ = self.rfcontext.nearest2D_vert(verts=self.sel_verts, max_dist=options['polypen merge dist'])
if nearest_vert:
self.draw_lines([nearest_vert.co, hit_pos])
elif not self.actions.shift and self.actions.ctrl:
if self.next_state == 'edge-face':
e0,_ = self.rfcontext.nearest2D_edge(edges=self.sel_edges) #next(iter(self.sel_edges))
if not e0: return
e1,d = self.rfcontext.nearest2D_edge(edges=self.vis_edges)
if e1 and d < self.rfcontext.drawing.scale(15) and e0 == e1:
bmv1,bmv2 = e1.verts
p0 = hit_pos
f = next(iter(e1.link_faces), None)
if f:
lco = []
for v0,v1 in iter_pairs(f.verts, True):
lco.append(v0.co)
if (v0 == bmv1 and v1 == bmv2) or (v0 == bmv2 and v1 == bmv1):
lco.append(p0)
self.draw_lines(lco)
else:
self.draw_lines([bmv1.co, hit_pos])
self.draw_lines([bmv2.co, hit_pos])
else:
# self.draw_lines([hit_pos])
bmv1,bmv2 = e0.verts
if self.nearest_vert and not self.nearest_vert.select:
p0 = self.nearest_vert.co
else:
p0 = hit_pos
self.draw_lines([p0, bmv1.co, bmv2.co])
elif self.next_state == 'edge-quad':
# a Desmos construction of how this works: https://www.desmos.com/geometry/bmmx206thi
xy0, xy1, xy2, xy3 = self._get_edge_quad_verts()
if xy0 is None: return
co0 = self.rfcontext.raycast_sources_Point2D(xy0)[0]
co1 = self.rfcontext.raycast_sources_Point2D(xy1)[0]
co2 = self.rfcontext.raycast_sources_Point2D(xy2)[0]
co3 = self.rfcontext.raycast_sources_Point2D(xy3)[0]
self.draw_lines([co1, co2, co3, co0])
elif self.next_state == 'edge-quad-snap':
e0,_ = self.rfcontext.nearest2D_edge(edges=self.sel_edges)
e1 = self.nearest_edge
if not e0 or not e1: return
bmv0,bmv1 = e0.verts
bmv2,bmv3 = e1.verts
p0,p1 = self.rfcontext.Point_to_Point2D(bmv0.co),self.rfcontext.Point_to_Point2D(bmv1.co)
p2,p3 = self.rfcontext.Point_to_Point2D(bmv2.co),self.rfcontext.Point_to_Point2D(bmv3.co)
if intersect2d_segment_segment(p1, p2, p3, p0): bmv2,bmv3 = bmv3,bmv2
# if e0.vector2D(self.rfcontext.Point_to_Point2D).dot(e1.vector2D(self.rfcontext.Point_to_Point2D)) > 0:
# bmv2,bmv3 = bmv3,bmv2
self.draw_lines([bmv0.co, bmv1.co, bmv2.co, bmv3.co])
elif self.next_state == 'tri-quad':
if self.nearest_vert and not self.nearest_vert.select:
p0 = self.nearest_vert.co
else:
p0 = hit_pos
e1,_ = self.rfcontext.nearest2D_edge(edges=self.sel_edges)
if not e1: return
bmv1,bmv2 = e1.verts
f = next(iter(e1.link_faces), None)
if not f: return
lco = []
for v0,v1 in iter_pairs(f.verts, True):
lco.append(v0.co)
if (v0 == bmv1 and v1 == bmv2) or (v0 == bmv2 and v1 == bmv1):
lco.append(p0)
self.draw_lines(lco)
#self.draw_lines([p0, bmv1.co, bmv2.co])
# elif self.next_state == 'edges-face':
# if self.nearest_vert and not self.nearest_vert.select:
# p0 = self.nearest_vert.co
# else:
# p0 = hit_pos
# e1,_ = self.rfcontext.nearest2D_edge(edges=self.sel_edges)
# bmv1,bmv2 = e1.verts
# self.draw_lines([p0, bmv1.co, bmv2.co])
# self.drawing.disable_stipple() |
py | b40a853f1f900e5e8876f205159b15b0e33269ea | from rest_framework import serializers
from .coins.models import Coin
from .faucets.models import Faucet, Location, CoinSpawn
class CoinSerializer(serializers.ModelSerializer):
class Meta:
model = Coin
fields = ('id', 'name', 'logo',)
class LocationSerializer(serializers.ModelSerializer):
class Meta:
model = Location
fields = ('id', 'name', 'lat', 'lng')
class FaucetSerializer(serializers.ModelSerializer):
location = LocationSerializer()
coin = CoinSerializer()
class Meta:
model = Location
fields = ('id', 'coin', 'location')
class CoinSpawnSerializer(serializers.ModelSerializer):
faucet = FaucetSerializer()
class Meta:
model = CoinSpawn
fields = ('id', 'faucet', 'type', 'amount', 'captured_by',)
|
py | b40a8720d35667d9b59b2c6531848e39484e8ba9 | #!/usr/bin/env python3
import email.message
import mimetypes
import os.path
import smtplib
def generate_email(sender, recipient, subject, body, attachment_path):
message = email.message.EmailMessage()
message['From'] = sender
message['To'] = recipient
message['Subject'] = subject
messgae.set_content(body)
if attachment_path != '':
attachment_filename = os.path.basename(attachment_path)
mime_type, _ = mimetypes.guess_type(attachment_path)
mime_type, mime_subtype = mime_type.split('/', 1)
with open(attachment_path, 'rb') as ap:
message.add_attachment(ap.read(),
maintype=mime_type,
subtype=mime_subtype,
filename=attachment_filename)
return message
def send_email(message):
mail_server = smtplib.SMTP('localhost')
mail_server.send_message(message)
mail_server.quit() |
py | b40a87cea7b94443f4c8733850bdd2e61ad91da2 | """Views for the moderation app"""
from collections import defaultdict
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect
from django.views.generic import ListView, UpdateView, View
from pure_pagination.mixins import PaginationMixin
from open_connect.groups.models import Group
from open_connect.moderation.forms import ModNotificationUpdateForm
from open_connect.moderation.models import Flag
from open_connect.moderation.utils import moderate_messages
from open_connect.connectmessages.models import Message, MESSAGE_STATUSES
from open_connect.connect_core.utils.mixins import (
SortableListMixin,
DateTimeRangeListMixin,
PaginateByMixin
)
from open_connect.connect_core.utils.views import CommonViewMixin
POSSIBLE_ACTIONS = [statuspair[0] for statuspair in MESSAGE_STATUSES]
class ModeratorOnlyMixin(object):
"""Mixin that restricts the view to those who can moderate messages"""
def dispatch(self, request, *args, **kwargs):
"""Override for the view's dispatch method"""
if not request.user.can_moderate:
raise Http404
return super(ModeratorOnlyMixin, self).dispatch(
request, *args, **kwargs)
class ModeratorView(
PaginationMixin, ModeratorOnlyMixin, CommonViewMixin, ListView):
"""View that handles viewing messages to be moderated"""
model = Message
template_name = "moderation/messagelist.html"
paginate_by = 20
nav_active_item = 'Admin'
dd_active_item = 'Message Moderation'
def get_context_data(self, **kwargs):
"""Add additional context to the moderation page"""
context = super(ModeratorView, self).get_context_data(**kwargs)
group_id = self.kwargs.get('group')
if group_id:
context['group'] = get_object_or_404(Group, pk=group_id)
return context
def get_queryset(self):
"""Get the queryset for the page"""
queryset = self.request.user.messages_to_moderate
# Filter by group if group is in kwargs
group = self.kwargs.get('group')
if group:
queryset = queryset.filter(thread__group_id=group)
queryset = queryset.select_related(
'thread', 'thread__group', 'thread__group__group', 'sender')
return queryset
class SubmitView(ModeratorOnlyMixin, View):
"""View that processes"""
http_method_names = [u'post']
# pylint: disable=unused-argument, no-self-use
def http_method_not_allowed(self, request, *args, **kwargs):
"""Redirect users who are not performing a POST request"""
# Instead of just returning the standard "method not allowed" HTTP
# status code, we can forward to the moderation admin
return redirect(reverse('mod_admin'))
# pylint: disable=no-self-use
def get_success_url(self, request):
"""The url a user should be returned to if post was a success."""
return request.POST.get('next', reverse('mod_admin'))
# pylint: disable=unused-argument
def post(self, request, **kwargs):
"""Process moderation changes."""
change_count = 0
actions = defaultdict(list)
for message_string, action in request.POST.iteritems():
if not message_string.startswith('message-'):
continue
message_id = message_string.split('-')[1]
if message_id.isdigit() and action in POSSIBLE_ACTIONS:
actions[action].append(message_id)
if actions:
change_count += moderate_messages(actions, request.user)
if not change_count:
messages.warning(request, 'No Messages Updated')
else:
pluralized_message = 'Messages' if change_count > 1 else 'Message'
messages.success(
request, "Updated %s %s" % (change_count, pluralized_message))
cache.delete('%s_messages_to_mod' % self.request.user.pk)
return redirect(self.get_success_url(request))
class ModerationFrequencyUpdateView(ModeratorOnlyMixin, UpdateView):
"""View for updating individual subscrptions for individual groups"""
http_method_names = [u'post']
model = get_user_model()
form_class = ModNotificationUpdateForm
success_url = reverse_lazy('user_profile')
def get_object(self, queryset=None):
"""Get the User object to be modified"""
return self.request.user
def form_valid(self, form):
"""Method to handle a valid form"""
form.save()
messages.success(
self.request, "Your moderation frequency has been set")
return HttpResponseRedirect(self.get_success_url())
class FlagLogView(PaginationMixin, PaginateByMixin, DateTimeRangeListMixin,
SortableListMixin, CommonViewMixin, ModeratorOnlyMixin,
ListView):
"""List moderation actions."""
model = Flag
valid_order_by = [
'moderation_action__messagemoderationaction__message__text',
'moderation_action__messagemoderationaction__message__status',
'flagged_by__first_name',
'created_at',
'moderation_action__moderator__first_name',
'moderation_action__modified_at',
'moderation_action__messagemoderationaction__newstatus'
]
default_order_by = '-created_at'
date_range_field = 'created_at'
nav_active_item = 'Admin'
dd_active_item = 'Flag Log'
paginate_by = 25
context_object_name = 'flags'
def get_queryset(self):
"""Get the queryset for the Flag log"""
queryset = super(FlagLogView, self).get_queryset()
if not self.request.user.global_moderator:
queryset = queryset.filter(
message__thread__group__in=self.request.user.groups_moderating
).distinct()
return queryset
|
py | b40a88115b8572159b7954aa14f6187b642cd2d6 | _base_ = [
'../_base_/models/lcgn_config.py',
'../_base_/datasets/gqa_dataset.py',
'../_base_/schedules/schedule_vqa.py',
'../_base_/default_runtime.py'
] # yapf:disable
|
py | b40a88887cb6c19b73268814c95d97aa7300cddf | from django.db import models
# Create your models here.
class tweet(models.Model):
id = models.IntegerField(primary_key=True)
text = models.TextField()
date = models.CharField(max_length=100)
flag = models.CharField(max_length=100)
user = models.CharField(max_length=100)
target = models.IntegerField()
def __str__(self):
return str(self.id)
|
py | b40a88f8fca0363a429186eda9fbaed967c7f2b1 | """
MIT License
Copyright (c) 2020 Erdem Yılmaz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__author__: str = 'R3nzTheCodeGOD'
__version__: str = 'v2.0.0'
from keyboard import is_pressed
from os import system
from time import perf_counter, sleep
from ctypes import windll
from PIL import ImageGrab, Image
from winsound import Beep
from mss import mss
from colorama import Fore, Style, init
S_HEIGHT, S_WIDTH = (ImageGrab.grab().size)
GRABZONE: int = 5
TRIGGER_KEY: str = 'shift'
SWITCH_KEY: str = 'ctrl + tab'
GRABZONE_KEY_UP: str = 'ctrl + up'
GRABZONE_KEY_DOWN: str = 'ctrl + down'
mods: tuple = ('0.3s Delay', '0.2s Delay', '0.1s Delay', 'No Delay Full-Auto')
class FoundEnemy(Exception):
pass
class TriggerBot:
def __init__(self) -> None:
self._mode: int = 2
self._last_reac: int = 0
def switch(self):
if self._mode != 3: self._mode += 1
else: self._mode = 0
if self._mode == 0: Beep(200, 100)
elif self._mode == 1: Beep(200, 100), Beep(200, 100)
elif self._mode == 2: Beep(200, 100), Beep(200, 100), Beep(200, 100)
elif self._mode == 3: Beep(200, 100), Beep(200, 100), Beep(200, 100), Beep(200, 100)
def color_check(self, red: int, green: int, blue: int) -> bool:
if green >= 190:
return False
if green >= 140:
return abs(red - blue) <= 8 and red - green >= 50 and blue - green >= 50 and red >= 105 and blue >= 105
return abs(red - blue) <= 13 and red - green >= 60 and blue - green >= 60 and red >= 110 and blue >= 100
def grab(self) -> Image:
with mss() as sct:
bbox: tuple = (int(S_HEIGHT / 2 - GRABZONE), int(S_WIDTH / 2 - GRABZONE), int(S_HEIGHT / 2 + GRABZONE), int(S_WIDTH / 2 + GRABZONE))
sct_img = sct.grab(bbox)
return Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')
def scan(self) -> None:
start_time: float = perf_counter()
pmap: Image = self.grab()
try:
for x in range(0, GRABZONE*2):
for y in range(0, GRABZONE*2):
r, g, b = pmap.getpixel((x,y))
if self.color_check(r, g, b): raise FoundEnemy
except FoundEnemy:
self._last_reac = int((perf_counter() - start_time)*1000)
windll.user32.mouse_event(2, 0, 0, 0, 0)
windll.user32.mouse_event(4, 0, 0, 0, 0)
if self._mode == 0: sleep(0.3)
elif self._mode == 1: sleep(0.2)
elif self._mode == 2: sleep(0.1)
elif self._mode == 3: pass
def print_banner(bot: TriggerBot) -> None:
system('cls')
print(Style.BRIGHT + Fore.CYAN + f'{__author__} Valorant External Cheat {__version__}' + Style.RESET_ALL)
print('====== Controls ======')
print('Trigger Key :', Fore.YELLOW + TRIGGER_KEY + Style.RESET_ALL)
print('Mode Chage Key :', Fore.YELLOW + SWITCH_KEY + Style.RESET_ALL)
print('Grab Zone Change Key :', Fore.YELLOW + GRABZONE_KEY_UP + '/' + GRABZONE_KEY_DOWN + Style.RESET_ALL)
print('===== Information ====')
print('Mode :', Fore.CYAN + mods[bot._mode] + Style.RESET_ALL)
print('Grab Zone :', Fore.CYAN + str(GRABZONE) + 'x' + str(GRABZONE) + Style.RESET_ALL)
print('Trigger Status :', Fore.GREEN + f'Hold down the "{TRIGGER_KEY}" key' + Style.RESET_ALL)
print('Last React Time :', Fore.CYAN + str(bot._last_reac) + Style.RESET_ALL + ' ms (' + str((bot._last_reac)/(GRABZONE*GRABZONE)) + 'ms/pix)')
if __name__ == "__main__":
print('e340d4e42d032023127fcd4a42ea34349fc0b00b982047ad614d405fc2cd1168')
init()
system('@echo off')
system('cls')
bot = TriggerBot()
print_banner(bot)
while True:
if is_pressed(SWITCH_KEY):
bot.switch()
print_banner(bot)
if is_pressed(GRABZONE_KEY_UP):
GRABZONE += 1
print_banner(bot)
Beep(400, 100)
if is_pressed(GRABZONE_KEY_DOWN):
if GRABZONE != 1: GRABZONE -= 1
print_banner(bot)
Beep(300, 100)
if is_pressed(TRIGGER_KEY):
bot.scan()
print_banner(bot)
sleep(0.001) # loop with keyboard.is_pressed causing input lag
"""Keyboard kütüphanesi çok fazla threding işlemi ve for döngüsü yapıyor.
Hızlı bilgisayarlarda fazla girilen while döngüsü problem oluşturuyor.""" |
py | b40a892715ae12ba20d4088a8d0aa8baa10fe3ea | """
============
Ftface Props
============
This is a demo script to show you how to use all the properties of an
FT2Font object. These describe global font properties. For
individual character metrics, use the Glyph object, as returned by
load_char
"""
import matplotlib
import matplotlib.ft2font as ft
#fname = '/usr/local/share/matplotlib/VeraIt.ttf'
fname = matplotlib.get_data_path() + '/fonts/ttf/DejaVuSans-Oblique.ttf'
#fname = '/usr/local/share/matplotlib/cmr10.ttf'
font = ft.FT2Font(fname)
print('Num faces :', font.num_faces) # number of faces in file
print('Num glyphs :', font.num_glyphs) # number of glyphs in the face
print('Family name :', font.family_name) # face family name
print('Style name :', font.style_name) # face style name
print('PS name :', font.postscript_name) # the postscript name
print('Num fixed :', font.num_fixed_sizes) # number of embedded bitmap in face
# the following are only available if face.scalable
if font.scalable:
# the face global bounding box (xmin, ymin, xmax, ymax)
print('Bbox :', font.bbox)
# number of font units covered by the EM
print('EM :', font.units_per_EM)
# the ascender in 26.6 units
print('Ascender :', font.ascender)
# the descender in 26.6 units
print('Descender :', font.descender)
# the height in 26.6 units
print('Height :', font.height)
# maximum horizontal cursor advance
print('Max adv width :', font.max_advance_width)
# same for vertical layout
print('Max adv height :', font.max_advance_height)
# vertical position of the underline bar
print('Underline pos :', font.underline_position)
# vertical thickness of the underline
print('Underline thickness :', font.underline_thickness)
for style in ('Italic',
'Bold',
'Scalable',
'Fixed sizes',
'Fixed width',
'SFNT',
'Horizontal',
'Vertical',
'Kerning',
'Fast glyphs',
'Multiple masters',
'Glyph names',
'External stream'):
bitpos = getattr(ft, style.replace(' ', '_').upper()) - 1
print('%-17s:' % style, bool(font.style_flags & (1 << bitpos)))
print(dir(font))
print(font.get_kerning)
|
py | b40a8936c7781166808df7628e2d2684ebdd330d | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
# TODO(sguada) Expose tf.with_dependencies
from tensorflow.python.ops import control_flow_ops
class AvgPool2DTest(tf.test.TestCase):
def testCreateAvgPool(self):
height, width = 3, 3
images = np.random.uniform(size=(5, height, width, 3))
output = tf.contrib.layers.avg_pool2d(images, [3, 3])
self.assertEquals(output.op.name, 'AvgPool2D/AvgPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCollectOutputs(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.avg_pool2d(images, [3, 3],
outputs_collections='outputs')
output_collected = tf.get_collection('outputs')[0]
self.assertEquals(output_collected.alias, 'AvgPool2D')
self.assertEquals(output_collected, output)
def testCreateSquareAvgPool(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.avg_pool2d(images, 3)
self.assertEquals(output.op.name, 'AvgPool2D/AvgPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateAvgPoolWithScope(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.avg_pool2d(images, [3, 3], scope='pool1')
self.assertEquals(output.op.name, 'pool1/AvgPool')
def testCreateAvgPoolWithSamePadding(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.avg_pool2d(images, [3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 2, 3])
def testCreateAvgPoolStrideWithSamePadding(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.avg_pool2d(images, [3, 3], stride=1,
padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalAvgPool(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.avg_pool2d(images, images.get_shape()[1:3],
stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
class BiasAddTest(tf.test.TestCase):
def testCreate(self):
height, width = 3, 3
with self.test_session():
images = np.random.uniform(size=(5, height, width, 3))
output = tf.contrib.layers.bias_add(images)
self.assertEquals(output.op.name, 'BiasAdd/BiasAdd')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateWithActivation(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.bias_add(images, activation_fn=tf.nn.relu)
self.assertEquals(output.op.name, 'BiasAdd/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateDimensions(self):
dims = (2, 3, 4)
shape = [5, 2, 3, 4]
with self.test_session():
for d in dims:
input_shape = shape[:d]
inputs = tf.random_uniform(input_shape, seed=1)
output = tf.contrib.layers.bias_add(inputs)
self.assertListEqual(output.get_shape().as_list(), input_shape)
biases = tf.contrib.framework.get_variables_by_name('biases')[-1]
self.assertListEqual(biases.get_shape().as_list(), [input_shape[-1]])
class Convolution2dTest(tf.test.TestCase):
def testCreateConv(self):
height, width = 3, 3
with self.test_session():
images = np.random.uniform(size=(5, height, width, 4))
output = tf.contrib.layers.convolution2d(images, 32, [3, 3])
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = tf.contrib.framework.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])
biases = tf.contrib.framework.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateSquareConv(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.convolution2d(images, 32, 3)
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateConvWithTensorShape(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.convolution2d(images, 32,
images.get_shape()[1:3])
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateFullyConv(self):
height, width = 6, 6
with self.test_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
output = tf.contrib.layers.convolution2d(images, 64,
images.get_shape()[1:3],
padding='VALID')
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 64])
biases = tf.contrib.framework.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [64])
def testCreateVerticalConv(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 4), seed=1)
output = tf.contrib.layers.convolution2d(images, 32, [3, 1])
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height, width, 32])
weights = tf.contrib.framework.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 1, 4, 32])
biases = tf.contrib.framework.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateHorizontalConv(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 4), seed=1)
output = tf.contrib.layers.convolution2d(images, 32, [1, 3])
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height, width, 32])
weights = tf.contrib.framework.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [1, 3, 4, 32])
def testCreateConvWithStride(self):
height, width = 6, 6
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.convolution2d(images, 32, [3, 3], stride=2)
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height/2, width/2, 32])
def testCreateConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
with self.test_session():
self.assertFalse(tf.contrib.framework.get_variables('conv1/weights'))
self.assertFalse(tf.contrib.framework.get_variables('conv1/biases'))
tf.contrib.layers.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertTrue(tf.contrib.framework.get_variables('conv1/weights'))
self.assertTrue(tf.contrib.framework.get_variables('conv1/biases'))
def testCreateConvWithScope(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.convolution2d(images, 32, [3, 3],
scope='conv1')
self.assertEquals(output.op.name, 'conv1/Relu')
def testCreateConvWithCollection(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
with tf.name_scope('fe'):
conv = tf.contrib.layers.convolution2d(images, 32, [3, 3],
outputs_collections='outputs',
scope='Conv')
output_collected = tf.get_collection('outputs')[0]
self.assertEquals(output_collected.alias, 'fe/Conv')
self.assertEquals(output_collected, conv)
def testCreateConvWithoutActivation(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.convolution2d(images, 32, [3, 3],
activation_fn=None)
self.assertEquals(output.op.name, 'Conv/BiasAdd')
def testCreateConvValid(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.convolution2d(images, 32, [3, 3],
padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32])
def testCreateConvWithWD(self):
height, width = 3, 3
weight_decay = 0.01
with self.test_session() as sess:
images = tf.random_uniform((5, height, width, 3), seed=1)
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
tf.contrib.layers.convolution2d(images, 32, [3, 3],
weights_regularizer=regularizer)
l2_loss = tf.nn.l2_loss(
tf.contrib.framework.get_variables_by_name('weights')[0])
wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEquals(wd.op.name,
'Conv/weights/Regularizer/l2_regularizer')
sess.run(tf.initialize_all_variables())
self.assertAlmostEqual(sess.run(wd), weight_decay * l2_loss.eval())
def testCreateConvNoRegularizers(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
tf.contrib.layers.convolution2d(images, 32, [3, 3])
self.assertEquals(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), [])
def testReuseVars(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
tf.contrib.layers.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertEquals(len(tf.contrib.framework.get_variables()), 2)
tf.contrib.layers.convolution2d(images, 32, [3, 3], scope='conv1',
reuse=True)
self.assertEquals(len(tf.contrib.framework.get_variables()), 2)
def testNonReuseVars(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
tf.contrib.layers.convolution2d(images, 32, [3, 3])
self.assertEquals(len(tf.contrib.framework.get_variables()), 2)
tf.contrib.layers.convolution2d(images, 32, [3, 3])
self.assertEquals(len(tf.contrib.framework.get_variables()), 4)
def testReuseConvWithWD(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
weight_decay = tf.contrib.layers.l2_regularizer(0.01)
with tf.contrib.framework.arg_scope(
[tf.contrib.layers.convolution2d],
weights_regularizer=weight_decay):
tf.contrib.layers.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertEquals(len(tf.contrib.framework.get_variables()), 2)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
tf.contrib.layers.convolution2d(images, 32, [3, 3], scope='conv1',
reuse=True)
self.assertEquals(len(tf.contrib.framework.get_variables()), 2)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testConvWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
with tf.contrib.framework.arg_scope(
[tf.contrib.layers.convolution2d],
normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={'decay': 0.9}):
net = tf.contrib.layers.convolution2d(images, 32, [3, 3])
net = tf.contrib.layers.convolution2d(net, 32, [3, 3])
self.assertEquals(len(tf.contrib.framework.get_variables()), 8)
self.assertEquals(
len(tf.contrib.framework.get_variables('Conv/BatchNorm')), 3)
self.assertEquals(
len(tf.contrib.framework.get_variables('Conv_1/BatchNorm')), 3)
def testReuseConvWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
with tf.contrib.framework.arg_scope(
[tf.contrib.layers.convolution2d],
normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={'decay': 0.9}):
net = tf.contrib.layers.convolution2d(images, 32, [3, 3], scope='Conv')
net = tf.contrib.layers.convolution2d(net, 32, [3, 3], scope='Conv',
reuse=True)
self.assertEquals(len(tf.contrib.framework.get_variables()), 4)
self.assertEquals(
len(tf.contrib.framework.get_variables('Conv/BatchNorm')), 3)
self.assertEquals(
len(tf.contrib.framework.get_variables('Conv_1/BatchNorm')), 0)
def testCreateConvCreatesWeightsAndBiasesVarsWithRateTwo(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
with self.test_session():
self.assertFalse(tf.contrib.framework.get_variables('conv1/weights'))
self.assertFalse(tf.contrib.framework.get_variables('conv1/biases'))
tf.contrib.layers.convolution2d(images, 32, [3, 3], rate=2, scope='conv1')
self.assertTrue(tf.contrib.framework.get_variables('conv1/weights'))
self.assertTrue(tf.contrib.framework.get_variables('conv1/biases'))
def testOutputSizeWithRateTwoSamePadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 10, 12, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = tf.contrib.layers.convolution2d(images, num_filters,
[3, 3], rate=2, padding='SAME')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithRateTwoValidPadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 6, 8, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = tf.contrib.layers.convolution2d(images, num_filters, [3, 3],
rate=2, padding='VALID')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testDynamicOutputSizeWithRateOneValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 7, 9, num_filters]
with self.test_session():
images = tf.placeholder(np.float32, [None, None, None, input_size[3]])
output = tf.contrib.layers.convolution2d(images, num_filters, [3, 3],
rate=1, padding='VALID')
tf.initialize_all_variables().run()
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithRateTwoValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 5, 7, num_filters]
with self.test_session():
images = tf.placeholder(np.float32, [None, None, None, input_size[3]])
output = tf.contrib.layers.convolution2d(images, num_filters, [3, 3],
rate=2, padding='VALID')
tf.initialize_all_variables().run()
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testWithScope(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 5, 7, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = tf.contrib.layers.convolution2d(images, num_filters, [3, 3],
rate=2, padding='VALID',
scope='conv7')
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertEquals(output.op.name, 'conv7/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testWithScopeWithoutActivation(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 5, 7, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = tf.contrib.layers.convolution2d(images, num_filters, [3, 3],
rate=2, padding='VALID',
activation_fn=None, scope='conv7')
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertEquals(output.op.name, 'conv7/BiasAdd')
self.assertListEqual(list(output.eval().shape), expected_size)
class Convolution2dTransposeTests(tf.test.TestCase):
def testOutputSizeWithStrideOneSamePadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 10, 12, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = tf.contrib.layers.conv2d_transpose(
images, num_filters, [3, 3], stride=1, padding='SAME')
self.assertEquals(output.op.name, 'Conv2d_transpose/Relu')
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideOneValidPadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 12, 14, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = tf.contrib.layers.conv2d_transpose(
images, num_filters, [3, 3], stride=1, padding='VALID')
self.assertEquals(output.op.name, 'Conv2d_transpose/Relu')
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideTwoValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 19, 23, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = tf.contrib.layers.conv2d_transpose(
images, num_filters, [3, 3], stride=[2, 2], padding='VALID')
self.assertEquals(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith1x1StrideTwoSamePadding(self):
num_filters = 1
input_size = [1, 1, 1, 1]
expected_size = [1, 2, 2, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = tf.contrib.layers.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='SAME')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertEquals(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith1x1StrideTwoValidPadding(self):
num_filters = 1
input_size = [1, 1, 1, 1]
expected_size = [1, 2, 2, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = tf.contrib.layers.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='VALID')
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertEquals(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith2x2StrideTwoSamePadding(self):
num_filters = 1
input_size = [1, 2, 2, 1]
expected_size = [1, 4, 4, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = tf.contrib.layers.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='SAME')
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertEquals(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith2x2StrideTwoValidPadding(self):
num_filters = 1
input_size = [1, 2, 2, 1]
expected_size = [1, 4, 4, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = tf.contrib.layers.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='VALID')
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertEquals(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x1(self):
num_filters = 1
input_size = [1, 3, 2, 1]
expected_size = [1, 6, 5, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = tf.contrib.layers.conv2d_transpose(
images, num_filters, [2, 4], stride=[2, 1], padding='VALID')
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertEquals(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x4(self):
num_filters = 1
input_size = [1, 3, 2, 1]
expected_size = [1, 6, 8, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = tf.contrib.layers.conv2d_transpose(
images, num_filters, [2, 4], stride=[2, 4], padding='VALID')
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertEquals(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x5(self):
num_filters = 1
input_size = [1, 3, 2, 1]
expected_size = [1, 6, 10, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = tf.contrib.layers.conv2d_transpose(
images, num_filters, [2, 4], stride=[2, 5], padding='VALID')
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertEquals(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeRandomSizesAndStridesValidPadding(self):
np.random.seed(0)
max_image_size = 10
for _ in range(10):
num_filters = 1
input_size = [1, np.random.randint(1, max_image_size),
np.random.randint(1, max_image_size), 1]
filter_size = [np.random.randint(1, input_size[1] + 1),
np.random.randint(1, input_size[2] + 1)]
stride = [np.random.randint(1, 3), np.random.randint(1, 3)]
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
images = tf.random_uniform(input_size, seed=1)
transpose = tf.contrib.layers.conv2d_transpose(
images, num_filters, filter_size, stride=stride, padding='VALID')
conv = tf.contrib.layers.conv2d(
transpose, num_filters, filter_size, stride=stride, padding='VALID')
with self.test_session(graph=graph) as sess:
sess.run(tf.initialize_all_variables())
self.assertListEqual(list(conv.eval().shape), input_size)
def testDynamicOutputSizeWithStrideTwoValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 19, 23, num_filters]
images = tf.placeholder(np.float32, [None, None, None, input_size[3]])
output = tf.contrib.layers.conv2d_transpose(
images, num_filters, [3, 3], stride=[2, 2], padding='VALID')
self.assertListEqual(output.get_shape().as_list(), expected_size)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertEquals(output.op.name, 'Conv2d_transpose/Relu')
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithStrideTwoSamePadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 18, 22, num_filters]
with self.test_session():
images = tf.placeholder(np.float32, [None, None, None, input_size[3]])
output = tf.contrib.layers.conv2d_transpose(
images, num_filters, [3, 3], stride=[2, 2], padding='SAME')
tf.initialize_all_variables().run()
self.assertEquals(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testWithScope(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 19, 23, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = tf.contrib.layers.conv2d_transpose(
images, num_filters, [3, 3], stride=2, padding='VALID', scope='conv7')
self.assertEquals(output.op.name, 'conv7/Relu')
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertListEqual(list(output.eval().shape), expected_size)
def testWithScopeWithoutActivation(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 19, 23, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = tf.contrib.layers.conv2d_transpose(
images, num_filters, [3, 3], stride=2, padding='VALID',
activation_fn=None, scope='conv7')
self.assertEquals(output.op.name, 'conv7/BiasAdd')
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertListEqual(list(output.eval().shape), expected_size)
def testDeconvWithoutBiasesProducesConv2dTranspose(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 19, 23, num_filters]
stride = 2
padding = 'VALID'
with self.test_session() as sess:
images = tf.random_uniform(input_size, seed=1)
output_deconv = tf.contrib.layers.conv2d_transpose(
images, num_filters, [3, 3], stride=stride, padding=padding,
activation_fn=None, scope='conv7')
weights = tf.contrib.framework.get_variables_by_name('conv7/weights')[0]
output_conv2d_transpose = tf.nn.conv2d_transpose(
images,
weights,
expected_size,
[1, stride, stride, 1],
padding=padding)
sess.run(tf.initialize_all_variables())
output_deconv, output_conv2d_transpose = sess.run(
[output_deconv, output_conv2d_transpose])
self.assertTrue(np.isclose(output_deconv,
output_conv2d_transpose, 1e-5, 1e-5).all())
class ConvolutionInPlaneTest(tf.test.TestCase):
def testHorzConvWithBlankImage(self):
image = tf.ones((1, 10, 10, 1))
horz_gradients = tf.contrib.layers.conv2d_in_plane(
image,
weights_initializer=tf.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = tf.initialize_all_variables()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
expected = np.zeros((1, 10, 9, 1))
self.assertAllEqual(result, expected)
def testHorzConvWithBlankImageAndPlaceholder(self):
image = tf.placeholder(tf.float32, shape=(None, None, None, 1))
horz_gradients = tf.contrib.layers.conv2d_in_plane(
image,
weights_initializer=tf.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = tf.initialize_all_variables()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients,
feed_dict={image: np.ones((1, 10, 10, 1))})
expected = np.zeros((1, 10, 9, 1))
self.assertAllEqual(result, expected)
def testHorzConvWithRandomImageMultiBatch(self):
np.random.seed(1)
image = np.random.rand(5, 10, 10, 1)
expected = image[:, :, 0:-1, :] - image[:, :, 1:, :]
tf_image = tf.constant(image, dtype=tf.float32)
horz_gradients = tf.contrib.layers.conv2d_in_plane(
tf_image,
weights_initializer=tf.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = tf.initialize_all_variables()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testHorzConvWithRandomImageMultiBatchMultiChannel(self):
np.random.seed(1)
image = np.random.rand(5, 10, 10, 7)
expected = image[:, :, 0:-1, :] - image[:, :, 1:, :]
tf_image = tf.constant(image, dtype=tf.float32)
horz_gradients = tf.contrib.layers.conv2d_in_plane(
tf_image,
weights_initializer=tf.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = tf.initialize_all_variables()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testHorzConvWithVaryingImage(self):
image = np.asmatrix(('1.0 2.0 3.0;'
'1.1 2.0 4.0;'
'-4.3 0.0 8.9'))
expected = np.asmatrix(('-1.0 -1.0;'
'-0.9 -2.0;'
'-4.3 -8.9'))
expected = np.reshape(np.asarray(expected), (1, 3, 2, 1))
tf_image = tf.constant(image, shape=(1, 3, 3, 1), dtype=tf.float32)
horz_gradients = tf.contrib.layers.conv2d_in_plane(
tf_image,
weights_initializer=tf.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = tf.initialize_all_variables()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testVertConvWithBlankImage(self):
image = tf.ones((1, 10, 10, 1))
vert_gradients = tf.contrib.layers.conv2d_in_plane(
image,
weights_initializer=tf.constant_initializer([1, -1]),
kernel_size=[2, 1],
padding='VALID',
activation_fn=None)
init_op = tf.initialize_all_variables()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(vert_gradients)
expected = np.zeros((1, 9, 10, 1))
self.assertAllEqual(result, expected)
def testVertConvWithVaryingImage(self):
image = np.asmatrix(('1.0 2.0 3.0;'
'1.1 2.0 4.0;'
'-4.3 0.0 8.9'))
expected = np.asmatrix(('-0.1 0.0 -1.0;'
' 5.4 2.0 -4.9'))
expected = np.reshape(np.asarray(expected), (1, 2, 3, 1))
tf_image = tf.constant(image, shape=(1, 3, 3, 1), dtype=tf.float32)
vert_gradients = tf.contrib.layers.conv2d_in_plane(
tf_image,
weights_initializer=tf.constant_initializer([1, -1]),
kernel_size=[2, 1],
padding='VALID',
activation_fn=None)
init_op = tf.initialize_all_variables()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(vert_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
class DropoutTest(tf.test.TestCase):
def testCreateDropout(self):
height, width = 3, 3
with self.test_session():
images = np.random.uniform(size=(5, height, width, 3))
output = tf.contrib.layers.dropout(images)
self.assertEquals(output.op.name, 'Dropout/dropout/mul')
output.get_shape().assert_is_compatible_with(
tf.convert_to_tensor(images).get_shape())
def testCreateDropoutWithConstantTrue(self):
height, width = 3, 3
with self.test_session():
is_training = tf.constant(True)
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.dropout(images, is_training=is_training)
self.assertEquals(output.op.name, 'Dropout/dropout/mul')
output.get_shape().assert_is_compatible_with(images.get_shape())
def testCreateDropoutWithConstantFalse(self):
height, width = 3, 3
with self.test_session():
is_training = tf.constant(False)
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.dropout(images, is_training=is_training)
self.assertEquals(output.op.name, 'Dropout/Identity')
output.get_shape().assert_is_compatible_with(images.get_shape())
def testCreateDropoutWithPlaceholder(self):
height, width = 3, 3
with self.test_session():
is_training = tf.placeholder(dtype=tf.bool, shape=[])
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.dropout(images, is_training=is_training)
self.assertEquals(output.op.name, 'Dropout/cond/Merge')
output.get_shape().assert_is_compatible_with(images.get_shape())
def testCollectOutputs(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.dropout(images, outputs_collections='outputs')
c_output = tf.get_collection('outputs')[0]
self.assertEquals(c_output.alias, 'Dropout')
self.assertEquals(c_output, output)
def testDropout(self):
height, width = 10, 10
with self.test_session() as sess:
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
num_elem_initial = tf.reduce_mean(tf.to_float(images > 0))
output = tf.contrib.layers.dropout(images)
num_elem = tf.reduce_mean(tf.to_float(output > 0))
sess.run(tf.initialize_all_variables())
num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
self.assertLess(num_elem, num_elem_initial/2 + 0.1)
self.assertGreater(num_elem, num_elem_initial/2 - 0.1)
def testCreateDropoutNoTraining(self):
height, width = 3, 3
with self.test_session() as sess:
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
num_elem_initial = tf.reduce_mean(tf.to_float(images > 0))
output = tf.contrib.layers.dropout(images, is_training=False)
num_elem = tf.reduce_mean(tf.to_float(output > 0))
sess.run(tf.initialize_all_variables())
num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
self.assertEquals(num_elem, num_elem_initial)
outputs, inputs = sess.run([output, images])
self.assertAllClose(outputs, inputs)
def testCreateFCFollowByDropout(self):
height, width = 3, 3
with self.test_session() as sess:
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
output = tf.contrib.layers.fully_connected(images, 50)
num_elem_initial = tf.reduce_mean(tf.to_float(output > 0))
output = tf.contrib.layers.dropout(output)
num_elem = tf.reduce_mean(tf.to_float(output > 0))
sess.run(tf.initialize_all_variables())
num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
self.assertLess(num_elem, num_elem_initial/2 + 0.1)
self.assertGreater(num_elem, num_elem_initial/2 - 0.1)
def testCreateFCWithDropout(self):
height, width = 3, 3
with self.test_session() as sess:
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
output = tf.contrib.layers.fully_connected(
images, 50, normalizer_fn=tf.contrib.layers.dropout)
num_elem = tf.reduce_mean(tf.to_float(output > 0))
sess.run(tf.initialize_all_variables())
num_elem = sess.run(num_elem)
self.assertLess(num_elem, 0.5)
self.assertGreater(num_elem, 0.1)
class FlattenTest(tf.test.TestCase):
def testInvalidRank(self):
with tf.Graph().as_default() as g, self.test_session(g):
inputs = tf.placeholder(dtype=tf.float32)
inputs.set_shape(tf.TensorShape((5,)))
with self.assertRaisesRegexp(
ValueError, 'must have a least 2 dimensions'):
tf.contrib.layers.flatten(inputs)
def testUnknownLastDim(self):
with tf.Graph().as_default() as g, self.test_session(g):
inputs = tf.placeholder(dtype=tf.float32)
inputs.set_shape(tf.TensorShape((5, None)))
with self.assertRaisesRegexp(ValueError, '2nd dimension must be defined'):
tf.contrib.layers.flatten(inputs)
def testCollectOutputs(self):
height, width = 3, 3
with self.test_session():
images = np.random.uniform(size=(5, height, width, 3))
output = tf.contrib.layers.flatten(images, outputs_collections='outputs')
c_output = tf.get_collection('outputs')[0]
self.assertEquals(c_output.alias, 'Flatten')
self.assertEquals(c_output, output)
def testFlatten4D(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
output = tf.contrib.layers.flatten(images)
self.assertEquals(output.get_shape().num_elements(),
images.get_shape().num_elements())
self.assertEqual(output.get_shape()[0], images.get_shape()[0])
def testFlatten3D(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width), seed=1, name='images')
output = tf.contrib.layers.flatten(images)
self.assertEquals(output.get_shape().num_elements(),
images.get_shape().num_elements())
self.assertEqual(output.get_shape()[0], images.get_shape()[0])
def testFlattenBatchSize(self):
height, width = 3, 3
with self.test_session() as sess:
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
inputs = tf.placeholder(tf.int32, (None, height, width, 3))
output = tf.contrib.layers.flatten(inputs)
self.assertEquals(output.get_shape().as_list(),
[None, height * width * 3])
output = sess.run(output, {inputs: images.eval()})
self.assertEquals(output.size,
images.get_shape().num_elements())
self.assertEqual(output.shape[0], images.get_shape()[0])
def _sparsify(array, threshold=0.5):
array[array < threshold] = 0
non_zero = np.where(array)
indices = np.vstack(non_zero).T
values = array[non_zero]
shape = array.shape
return indices, values, shape
class PartialFlattenTest(tf.test.TestCase):
def testDensePartialFlatten(self):
"""Test `_inner_flatten` on `Tensor`s."""
shape = [2, 3, 4, 5, 6]
np.random.seed(5446)
inputs = np.random.randint(0, 100, size=shape)
for new_rank in [1, 2, 3, 4, 5]:
expected_new_shape = (shape[:new_rank - 1] +
[np.prod(shape[new_rank - 1:])])
expected_flattened = np.reshape(inputs, expected_new_shape)
inputs_t = tf.constant(inputs)
flattened_t = tf.contrib.layers.python.layers._inner_flatten(inputs_t, new_rank)
static_shape = flattened_t.get_shape().as_list()
self.assertEqual(static_shape, expected_new_shape)
with self.test_session() as sess:
flattened = sess.run(flattened_t)
np.testing.assert_array_equal(expected_flattened, flattened)
def testSparsePartialFlatten(self):
"""Test `_inner_flatten` on `SparseTensor`s."""
shape = [4, 3, 11, 6, 1, 3]
np.random.seed(10301)
random_ = np.random.rand(*shape)
indices, values, _ = _sparsify(random_)
for new_rank in [1, 2, 3, 4, 5]:
expected_shape = (shape[:new_rank - 1] + [np.prod(shape[new_rank - 1:])])
reshaped_random_ = np.reshape(random_, expected_shape)
expected_indices, expected_values, _ = _sparsify(reshaped_random_)
inputs_t = tf.SparseTensor(indices, values, shape)
flattened_t = tf.contrib.layers.python.layers._inner_flatten(
inputs_t, new_rank)
with self.test_session() as sess:
flattened = sess.run(flattened_t)
np.testing.assert_array_equal(expected_indices, flattened.indices)
np.testing.assert_array_equal(expected_values, flattened.values)
np.testing.assert_array_equal(expected_shape, flattened.shape)
def testIncompleteShape(self):
"""Test `_inner_flatten` shape inference for incomplete shapes."""
shape = [2, None, 4, None, 5, 6]
inputs = tf.placeholder(tf.int32)
inputs.set_shape(shape)
flattened1 = tf.contrib.layers.python.layers._inner_flatten(inputs, 1)
self.assertEquals([None], flattened1.get_shape().as_list())
flattened2 = tf.contrib.layers.python.layers._inner_flatten(inputs, 2)
self.assertEquals([2, None], flattened2.get_shape().as_list())
flattened3 = tf.contrib.layers.python.layers._inner_flatten(inputs, 3)
self.assertEquals([2, None, None], flattened3.get_shape().as_list())
flattened4 = tf.contrib.layers.python.layers._inner_flatten(inputs, 4)
self.assertEquals([2, None, 4, None], flattened4.get_shape().as_list())
flattened5 = tf.contrib.layers.python.layers._inner_flatten(inputs, 5)
self.assertEquals([2, None, 4, None, 30], flattened5.get_shape().as_list())
class FCTest(tf.test.TestCase):
def testCreateFC(self):
height, width = 3, 3
for layer_fn in (tf.contrib.layers.fully_connected, tf.contrib.layers.relu):
with tf.Graph().as_default() as g, self.test_session(g):
inputs = np.random.uniform(size=(5, height * width * 3))
output = layer_fn(inputs, 32)
self.assertEquals(output.op.name, 'fully_connected/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 32])
weights = tf.contrib.framework.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3 * 3 * 3, 32])
biases = tf.contrib.framework.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateFCWithScope(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
output = tf.contrib.layers.fully_connected(inputs, 32, scope='fc1')
self.assertEquals(output.op.name, 'fc1/Relu')
def testCreateFCWithCollection(self):
height, width = 3, 3
inputs = tf.random_uniform((5, height * width * 3), seed=1)
with tf.name_scope('fe'):
fc = tf.contrib.layers.fully_connected(inputs, 7,
outputs_collections='outputs',
scope='fc')
output_collected = tf.get_collection('outputs')[0]
self.assertEquals(output_collected.alias, 'fe/fc')
self.assertEquals(output_collected, fc)
def testCreateFcCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
inputs = tf.random_uniform((5, height * width * 3), seed=1)
with self.test_session():
self.assertFalse(tf.contrib.framework.get_variables('fc1/weights'))
self.assertFalse(tf.contrib.framework.get_variables('fc1/biases'))
tf.contrib.layers.fully_connected(inputs, 32, scope='fc1')
self.assertTrue(tf.contrib.framework.get_variables('fc1/weights'))
self.assertTrue(tf.contrib.framework.get_variables('fc1/biases'))
def testReuseVars(self):
height, width = 3, 3
inputs = tf.random_uniform((5, height * width * 3), seed=1)
with self.test_session():
tf.contrib.layers.fully_connected(inputs, 32, scope='fc1')
self.assertEquals(len(tf.contrib.framework.get_variables('fc1')), 2)
tf.contrib.layers.fully_connected(inputs, 32, scope='fc1', reuse=True)
self.assertEquals(len(tf.contrib.framework.get_variables('fc1')), 2)
def testNonReuseVars(self):
height, width = 3, 3
inputs = tf.random_uniform((5, height * width * 3), seed=1)
with self.test_session():
tf.contrib.layers.fully_connected(inputs, 32)
self.assertEquals(
len(tf.contrib.framework.get_variables('fully_connected')), 2)
tf.contrib.layers.fully_connected(inputs, 32)
self.assertEquals(
len(tf.contrib.framework.get_variables('fully_connected')), 4)
def testCreateFCWithoutActivation(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
output = tf.contrib.layers.fully_connected(inputs, 32, activation_fn=None)
self.assertEquals(output.op.name, 'fully_connected/BiasAdd')
def testCreateFCWithWD(self):
height, width = 3, 3
with self.test_session() as sess:
inputs = tf.random_uniform((5, height * width * 3), seed=1)
weight_decay = tf.contrib.layers.l2_regularizer(0.01)
tf.contrib.layers.fully_connected(inputs, 32,
weights_regularizer=weight_decay)
wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEquals(wd.op.name,
'fully_connected/weights/Regularizer/l2_regularizer')
sess.run(tf.initialize_all_variables())
self.assertLess(sess.run(wd), 0.4)
def testCreateNoRegularizers(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
tf.contrib.layers.fully_connected(inputs, 32)
self.assertEquals(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), [])
def testReuseFCWithWD(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
weight_decay = tf.contrib.layers.l2_regularizer(0.01)
tf.contrib.layers.fully_connected(inputs, 32,
weights_regularizer=weight_decay,
scope='FC')
self.assertEquals(len(tf.contrib.framework.get_variables()), 2)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
tf.contrib.layers.fully_connected(inputs, 32,
weights_regularizer=weight_decay,
scope='FC',
reuse=True)
self.assertEquals(len(tf.contrib.framework.get_variables()), 2)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testFCWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height * width * 3), seed=1)
with tf.contrib.framework.arg_scope(
[tf.contrib.layers.fully_connected],
normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={'decay': 0.9}):
net = tf.contrib.layers.fully_connected(images, 27)
net = tf.contrib.layers.fully_connected(net, 27)
self.assertEquals(len(tf.contrib.framework.get_variables()), 8)
self.assertEquals(len(tf.contrib.framework.get_variables(
'fully_connected/BatchNorm')), 3)
self.assertEquals(len(tf.contrib.framework.get_variables(
'fully_connected_1/BatchNorm')), 3)
def testReuseFCWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height * width * 3), seed=1)
with tf.contrib.framework.arg_scope(
[tf.contrib.layers.fully_connected],
normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={'decay': 0.9}):
net = tf.contrib.layers.fully_connected(images, 27, scope='fc1')
net = tf.contrib.layers.fully_connected(net, 27, scope='fc1',
reuse=True)
self.assertEquals(len(tf.contrib.framework.get_variables()), 4)
self.assertEquals(
len(tf.contrib.framework.get_variables('fc1/BatchNorm')), 3)
class BatchNormTest(tf.test.TestCase):
def testUnknownShape(self):
with tf.Graph().as_default() as g, self.test_session(g):
inputs = tf.placeholder(dtype=tf.float32)
with self.assertRaisesRegexp(ValueError, 'undefined rank'):
tf.contrib.layers.batch_norm(inputs)
def testUnknownLastDim(self):
with tf.Graph().as_default() as g, self.test_session(g):
inputs = tf.placeholder(dtype=tf.float32)
inputs.set_shape(tf.TensorShape((5, 3, 3, None)))
with self.assertRaisesRegexp(ValueError, 'undefined last dimension'):
tf.contrib.layers.batch_norm(inputs)
def testCreateOp(self):
height, width = 3, 3
with self.test_session():
images = np.random.uniform(size=(5, height, width, 3))
output = tf.contrib.layers.batch_norm(images)
self.assertTrue(output.op.name.startswith('BatchNorm/batchnorm'))
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateVariables(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
tf.contrib.layers.batch_norm(images, scale=True)
beta = tf.contrib.framework.get_variables_by_name('beta')[0]
gamma = tf.contrib.framework.get_variables_by_name('gamma')[0]
self.assertEquals(beta.op.name, 'BatchNorm/beta')
self.assertEquals(gamma.op.name, 'BatchNorm/gamma')
moving_mean = tf.contrib.framework.get_variables_by_name('moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables_by_name(
'moving_variance')[0]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testMovingAverageVariables(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
tf.contrib.layers.batch_norm(images, scale=True)
moving_mean = tf.contrib.framework.get_variables_by_name('moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables_by_name(
'moving_variance')[0]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testUpdatesCollection(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
tf.contrib.layers.batch_norm(images, updates_collections='my_update_ops')
update_layers = tf.get_collection('my_update_ops')
update_moving_mean = update_layers[0]
update_moving_variance = update_layers[1]
self.assertEquals(update_moving_mean.op.name,
'BatchNorm/AssignMovingAvg')
self.assertEquals(update_moving_variance.op.name,
'BatchNorm/AssignMovingAvg_1')
def testReuseVariables(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
tf.contrib.layers.batch_norm(images, scale=True, scope='bn')
tf.contrib.layers.batch_norm(images, scale=True, scope='bn', reuse=True)
beta = tf.contrib.framework.get_variables_by_name('beta')
gamma = tf.contrib.framework.get_variables_by_name('gamma')
self.assertEquals(len(beta), 1)
self.assertEquals(len(gamma), 1)
moving_mean = tf.contrib.framework.get_variables_by_name('moving_mean')
moving_variance = tf.contrib.framework.get_variables_by_name(
'moving_variance')
moving_vars = moving_mean + moving_variance
self.assertEquals(len(moving_vars), 2)
def testReuseUpdateOps(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
with tf.contrib.framework.arg_scope([tf.contrib.layers.batch_norm],
updates_collections='update_ops'):
tf.contrib.layers.batch_norm(images, scope='bn')
self.assertEquals(len(tf.get_collection('update_ops')), 2)
tf.contrib.layers.batch_norm(images, scope='bn', reuse=True)
self.assertEquals(len(tf.get_collection('update_ops')), 4)
def testCreateMovingVars(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
_ = tf.contrib.layers.batch_norm(images)
moving_mean = tf.contrib.framework.get_variables('BatchNorm/moving_mean')
self.assertEquals(len(moving_mean), 1)
self.assertEquals(moving_mean[0].op.name, 'BatchNorm/moving_mean')
moving_variance = tf.contrib.framework.get_variables(
'BatchNorm/moving_variance')
self.assertEquals(len(moving_variance), 1)
self.assertEquals(moving_variance[0].op.name, 'BatchNorm/moving_variance')
def testNoneUpdatesCollections(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output = tf.contrib.layers.batch_norm(images, decay=0.1,
updates_collections=None)
# updates_ops are not added to UPDATE_OPS collection.
self.assertEquals(tf.get_collection(tf.GraphKeys.UPDATE_OPS), [])
# Initialize all variables
sess.run(tf.initialize_all_variables())
moving_mean = tf.contrib.framework.get_variables(
'BatchNorm/moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables(
'BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
for _ in range(10):
sess.run([output])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testDelayedUpdateMovingVars(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output = tf.contrib.layers.batch_norm(images, decay=0.1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# updates_ops are added to UPDATE_OPS collection.
self.assertEquals(len(update_ops), 2)
with tf.control_dependencies(update_ops):
barrier = tf.no_op(name='barrier')
output = control_flow_ops.with_dependencies([barrier], output)
# Initialize all variables
sess.run(tf.initialize_all_variables())
moving_mean = tf.contrib.framework.get_variables(
'BatchNorm/moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables(
'BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
for _ in range(10):
sess.run([output])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEvalMovingVars(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output = tf.contrib.layers.batch_norm(images,
decay=0.1,
is_training=False)
self.assertEquals(tf.get_collection(tf.GraphKeys.UPDATE_OPS), [])
# Initialize all variables
sess.run(tf.initialize_all_variables())
moving_mean = tf.contrib.framework.get_variables(
'BatchNorm/moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables(
'BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# Simulate assigment from saver restore.
init_assigns = [tf.assign(moving_mean, expected_mean),
tf.assign(moving_variance, expected_var)]
sess.run(init_assigns)
for _ in range(10):
sess.run([output], {images: np.random.rand(*image_shape)})
mean = moving_mean.eval()
variance = moving_variance.eval()
# Although we feed different images, the moving_mean and moving_variance
# shouldn't change.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testReuseVars(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output_train = tf.contrib.layers.batch_norm(images,
decay=0.1,
is_training=True,
scope='BN')
output_eval = tf.contrib.layers.batch_norm(images,
decay=0.1,
is_training=False,
scope='BN',
reuse=True)
# Initialize all variables
sess.run(tf.initialize_all_variables())
moving_mean = tf.contrib.framework.get_variables(
'BN/moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables(
'BN/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
barrier = tf.no_op(name='barrier')
train_op = control_flow_ops.with_dependencies([barrier], output_train)
# Before updates the outputs are different for train and eval.
self.assertFalse(np.allclose(sess.run([output_train]),
sess.run([output_eval])))
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
# After convergence output_train and output_eval should be the same.
self.assertAllClose(sess.run([output_train]), sess.run([output_eval]))
def testIsTrainingVariable(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
is_training = tf.Variable(True)
output = tf.contrib.layers.batch_norm(images,
decay=0.1,
is_training=is_training)
# Initialize all variables
sess.run(tf.initialize_all_variables())
moving_mean = tf.contrib.framework.get_variables(
'BatchNorm/moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables(
'BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# Before updates the outputs are different depending of is_training.
output_true = sess.run([output], {is_training: True})
output_false = sess.run([output], {is_training: False})
self.assertFalse(np.allclose(output_true, output_false))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
barrier = tf.no_op(name='barrier')
train_op = control_flow_ops.with_dependencies([barrier], output)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
# After updates to convergence the outputs don't depend on is_training.
output_true = sess.run([output], {is_training: True})
output_false = sess.run([output], {is_training: False})
self.assertAllClose(output_true, output_false)
def testNoUpdatesWhenIsTrainingFalse(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output = tf.contrib.layers.batch_norm(images,
decay=0.1,
is_training=False)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# updates_ops are not added to UPDATE_OPS collection.
self.assertEquals(len(update_ops), 0)
# Initialize all variables
sess.run(tf.initialize_all_variables())
moving_mean = tf.contrib.framework.get_variables(
'BatchNorm/moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables(
'BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# When is_training is False batch_norm doesn't update moving_vars.
for _ in range(10):
sess.run([output])
self.assertAllClose(moving_mean.eval(), [0] * 3)
self.assertAllClose(moving_variance.eval(), [1] * 3)
def testNoneUpdatesCollectionNoTraining(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output = tf.contrib.layers.batch_norm(images,
decay=0.1,
updates_collections=None,
is_training=False)
# updates_ops are not added to UPDATE_OPS collection.
self.assertEquals(tf.get_collection(tf.GraphKeys.UPDATE_OPS), [])
# Initialize all variables
sess.run(tf.initialize_all_variables())
moving_mean = tf.contrib.framework.get_variables(
'BatchNorm/moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables(
'BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# When is_training is False batch_norm doesn't update moving_vars.
for _ in range(10):
sess.run([output])
self.assertAllClose(moving_mean.eval(), [0] * 3)
self.assertAllClose(moving_variance.eval(), [1] * 3)
def testNoneUpdatesCollectionIsTrainingVariable(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
is_training = tf.Variable(True)
output = tf.contrib.layers.batch_norm(images,
decay=0.1,
updates_collections=None,
is_training=is_training)
# updates_ops are not added to UPDATE_OPS collection.
self.assertEquals(tf.get_collection(tf.GraphKeys.UPDATE_OPS), [])
# Initialize all variables
sess.run(tf.initialize_all_variables())
moving_mean = tf.contrib.framework.get_variables(
'BatchNorm/moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables(
'BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# When is_training is False batch_norm doesn't update moving_vars.
for _ in range(10):
sess.run([output], {is_training: False})
self.assertAllClose(moving_mean.eval(), [0] * 3)
self.assertAllClose(moving_variance.eval(), [1] * 3)
# Before updates the outputs are different depending of is_training.
output_true = sess.run([output], {is_training: True})
output_false = sess.run([output], {is_training: False})
self.assertFalse(np.allclose(output_true, output_false))
# When is_training is True update moving_vars.
for _ in range(10):
sess.run([output], {is_training: True})
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(moving_mean.eval(), expected_mean)
self.assertAllClose(moving_variance.eval(), expected_var)
# After updates to convergence the outputs don't depend on is_training.
output_true = sess.run([output], {is_training: True})
output_false = sess.run([output], {is_training: False})
self.assertTrue(np.allclose(output_true, output_false))
def testTrainMovingVars(self):
"""Test that the gradients are stable while the moving_mean is updated.
Since the moving_mean is used as shift to compute the tf.momments, the
gradients could diverge, this test checks that gradients remains stable
while the moving_mean is updated.
"""
height, width = 7, 7
num_channels = 32
with self.test_session() as sess:
image_shape = (10, height, width, num_channels)
image_values = np.random.rand(*image_shape) + 2
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output = tf.contrib.layers.batch_norm(images,
decay=0.2,
updates_collections=None,
is_training=True)
self.assertEquals(tf.get_collection(tf.GraphKeys.UPDATE_OPS), [])
objective = tf.reduce_sum(output)
[images_gradients] = tf.gradients(objective, images)
# Initialize all variables
sess.run(tf.initialize_all_variables())
moving_mean = tf.contrib.framework.get_variables(
'BatchNorm/moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables(
'BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * num_channels)
self.assertAllClose(variance, [1] * num_channels)
# Initial input gradients.
images_gradients_value = sess.run(images_gradients)
for _ in range(10):
np_output, new_images_gradients = sess.run([output, images_gradients])
# The outputs should be close to 0.0 mean and 1.0 variance
self.assertAllClose(np.mean(np_output, axis=(0, 1, 2)),
[0] * num_channels, rtol=0.1, atol=0.1)
self.assertAllClose(np.var(np_output, axis=(0, 1, 2)),
[1] * num_channels, rtol=0.1, atol=0.1)
# The gradients should change slowly while updating moving_mean.
max_diff = np.max(np.abs(images_gradients_value - new_images_gradients))
self.assertGreater(max_diff, 0.0)
self.assertLess(max_diff, 5e-5)
self.assertAllClose(moving_mean.eval(), expected_mean)
self.assertAllClose(moving_variance.eval(), expected_var)
def testCustomInitializer(self):
height, width = 3, 3
channels = 3
with self.test_session() as sess:
images = np.ones((5, height, width, channels))*9.0
beta = tf.constant_initializer(np.ones(channels)*5.0)
gamma = tf.constant_initializer(np.ones(channels)*2.0)
mean = tf.constant_initializer(np.ones(channels)*5.0)
variance = tf.constant_initializer(np.ones(channels)*4.0)
output = tf.contrib.layers.batch_norm(images,
is_training=False,
scale=True,
epsilon=0.0,
initializers={
'beta': beta,
'gamma': gamma,
'moving_mean': mean,
'moving_variance': variance,
})
sess.run(tf.initialize_all_variables())
outs = sess.run(output)
self.assertAllClose(outs, images)
class LayerNormTest(tf.test.TestCase):
def testUnknownShape(self):
with tf.Graph().as_default() as g, self.test_session(g):
inputs = tf.placeholder(dtype=tf.float32)
with self.assertRaisesRegexp(ValueError, 'undefined rank'):
tf.contrib.layers.layer_norm(inputs)
def testUnknownLastDim(self):
with tf.Graph().as_default() as g, self.test_session(g):
inputs = tf.placeholder(dtype=tf.float32)
inputs.set_shape(tf.TensorShape((5, 3, 3, None)))
with self.assertRaisesRegexp(ValueError, 'undefined last dimension'):
tf.contrib.layers.layer_norm(inputs)
def testCreateOp(self):
height, width = 3, 3
with self.test_session():
images = np.random.uniform(size=(5, height, width, 3))
output = tf.contrib.layers.layer_norm(images)
self.assertTrue(output.op.name.startswith('LayerNorm/batchnorm'))
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateVariables(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
tf.contrib.layers.layer_norm(images)
beta = tf.contrib.framework.get_variables_by_name('beta')[0]
gamma = tf.contrib.framework.get_variables_by_name('gamma')[0]
self.assertEquals(beta.op.name, 'LayerNorm/beta')
self.assertEquals(gamma.op.name, 'LayerNorm/gamma')
def testReuseVariables(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
tf.contrib.layers.layer_norm(images, scope='ln')
tf.contrib.layers.layer_norm(images, scope='ln', reuse=True)
beta = tf.contrib.framework.get_variables_by_name('beta')
gamma = tf.contrib.framework.get_variables_by_name('gamma')
self.assertEquals(len(beta), 1)
self.assertEquals(len(gamma), 1)
def testReuseVars(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output_train = tf.contrib.layers.layer_norm(images, scope='LN')
output_eval = tf.contrib.layers.layer_norm(images,
scope='LN',
reuse=True)
# Initialize all variables
sess.run(tf.initialize_all_variables())
# output_train and output_eval should be the same.
self.assertAllClose(sess.run([output_train]), sess.run([output_eval]))
def doOutputTest(self, input_shape):
with self.test_session() as sess:
input_values = np.random.rand(*input_shape)
inputs = tf.constant(input_values, shape=input_shape, dtype=tf.float32)
output_op = tf.contrib.layers.layer_norm(inputs, scope='LN')
# Initialize all variables
sess.run(tf.initialize_all_variables())
# The mean and variance of the output should be close to 0 and 1
# respectively.
moments_axis = tuple([i for i in range(1, len(input_shape))])
outputs = sess.run(output_op)
expected_mean = np.zeros(input_shape[0])
expected_var = np.ones(input_shape[0])
mean = np.mean(outputs, axis=moments_axis)
var = np.var(outputs, axis=moments_axis)
tol = 1e-5
self.assertAllClose(mean, expected_mean, rtol=tol, atol=tol)
self.assertAllClose(var, expected_var, rtol=tol, atol=tol)
def testOutput2DInput(self):
self.doOutputTest((10, 300))
def testOutput4DInput(self):
self.doOutputTest((100, 10, 10, 3))
class MaxPool2DTest(tf.test.TestCase):
def testCreateMaxPool(self):
height, width = 3, 3
images = np.random.uniform(size=(5, height, width, 3)).astype(np.float32)
output = tf.contrib.layers.max_pool2d(images, [3, 3])
self.assertEquals(output.op.name, 'MaxPool2D/MaxPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCollectOutputs(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.max_pool2d(images, [3, 3],
outputs_collections='outputs')
output_collected = tf.get_collection('outputs')[0]
self.assertEquals(output_collected.alias, 'MaxPool2D')
self.assertEquals(output_collected, output)
def testCreateSquareMaxPool(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.max_pool2d(images, 3)
self.assertEquals(output.op.name, 'MaxPool2D/MaxPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateMaxPoolWithScope(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.max_pool2d(images, [3, 3], scope='pool1')
self.assertEquals(output.op.name, 'pool1/MaxPool')
def testCreateMaxPoolWithSamePadding(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.max_pool2d(images, [3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 2, 3])
def testCreateMaxPoolStrideWithSamePadding(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.max_pool2d(images, [3, 3], stride=1,
padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalMaxPool(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.max_pool2d(images, images.get_shape()[1:3],
stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
class OneHotEncodingTest(tf.test.TestCase):
def testOneHotEncodingCreate(self):
with self.test_session():
labels = np.array([0, 1, 2])
output = tf.contrib.layers.one_hot_encoding(labels, num_classes=3)
self.assertEquals(output.op.name, 'OneHotEncoding/one_hot')
self.assertListEqual(output.get_shape().as_list(), [3, 3])
def testCollectOutputs(self):
with self.test_session():
labels = tf.constant([0, 1, 2])
output = tf.contrib.layers.one_hot_encoding(labels, num_classes=3,
outputs_collections='outputs')
c_output = tf.get_collection('outputs')[0]
self.assertEquals(c_output.alias, 'OneHotEncoding')
self.assertEquals(c_output, output)
def testOneHotEncoding(self):
with self.test_session():
labels = tf.constant([0, 1, 2])
one_hot_labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
output = tf.contrib.layers.one_hot_encoding(labels, num_classes=3)
self.assertAllClose(output.eval(), one_hot_labels.eval())
def testOneHotEncodingInt32(self):
with self.test_session():
labels = tf.constant([0, 1, 2], dtype=tf.int32)
one_hot_labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
output = tf.contrib.layers.one_hot_encoding(labels, num_classes=3)
self.assertAllClose(output.eval(), one_hot_labels.eval())
class RepeatTests(tf.test.TestCase):
def testRepeat(self):
height, width = 3, 3
with self.test_session():
images = np.random.uniform(size=(5, height, width, 3))
output = tf.contrib.layers.repeat(images, 3,
tf.contrib.layers.conv2d, 32, [3, 3])
self.assertEquals(output.op.name, 'Repeat/convolution2d_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 32])
def testRepeatWithScope(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
output = tf.contrib.layers.repeat(images, 3,
tf.contrib.layers.conv2d, 32, [3, 3],
scope='conv1')
self.assertEquals(output.op.name, 'conv1/conv1_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 32])
class SeparableConv2dTest(tf.test.TestCase):
def testCreateConvInt32(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform(
(5, height, width, 3), seed=1, dtype=tf.int32, maxval=12345)
with self.assertRaisesRegexp(TypeError, 'non-floating point type'):
tf.contrib.layers.separable_conv2d(images, 32, [3, 3], 2)
def testCreateConvFloat32(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform(
(5, height, width, 3), seed=1, dtype=tf.float32)
output = tf.contrib.layers.separable_conv2d(images, 32, [3, 3], 2)
self.assertEquals(output.op.name, 'SeparableConv2d/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateConvFloat64(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform(
(5, height, width, 3), seed=1, dtype=tf.float64)
output = tf.contrib.layers.separable_conv2d(images, 32, [3, 3], 2)
self.assertEquals(output.op.name, 'SeparableConv2d/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateDepthwiseConv(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.separable_conv2d(images, None, [3, 3], 2)
self.assertEquals(output.op.name, 'SeparableConv2d/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 6])
def testCreateConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
with self.test_session():
self.assertFalse(
tf.contrib.framework.get_variables('conv1/depthwise_weights'))
self.assertFalse(
tf.contrib.framework.get_variables('conv1/pointwise_weights'))
self.assertFalse(tf.contrib.framework.get_variables('conv1/biases'))
tf.contrib.layers.separable_conv2d(images, 32, [3, 3], 4, scope='conv1')
self.assertTrue(
tf.contrib.framework.get_variables('conv1/depthwise_weights'))
self.assertTrue(
tf.contrib.framework.get_variables('conv1/pointwise_weights'))
self.assertTrue(tf.contrib.framework.get_variables('conv1/biases'))
def testCreateDepthwiseConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
with self.test_session():
self.assertFalse(
tf.contrib.framework.get_variables('conv1/depthwise_weights'))
self.assertFalse(
tf.contrib.framework.get_variables('conv1/pointwise_weights'))
self.assertFalse(tf.contrib.framework.get_variables('conv1/biases'))
tf.contrib.layers.separable_conv2d(images, None, [3, 3], 4, scope='conv1')
self.assertTrue(
tf.contrib.framework.get_variables('conv1/depthwise_weights'))
self.assertFalse(
tf.contrib.framework.get_variables('conv1/pointwise_weights'))
self.assertTrue(tf.contrib.framework.get_variables('conv1/biases'))
def testCreateConvWithScope(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.separable_conv2d(
images, 32, [3, 3], 6, scope='conv1')
self.assertEquals(output.op.name, 'conv1/Relu')
def testCreateConvWithoutActivation(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.separable_conv2d(
images, 32, [3, 3], 8, activation_fn=None)
self.assertEquals(output.op.name, 'SeparableConv2d/BiasAdd')
def testCreateConvValid(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.separable_conv2d(
images, 32, [3, 3], 2, padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32])
def testCreateDepthwiseConvValid(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = tf.contrib.layers.separable_conv2d(
images, None, [3, 3], 2, padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 6])
def testCreateConvWithWeightDecay(self):
tf.set_random_seed(0)
height, width = 3, 3
with self.test_session() as sess:
images = tf.random_uniform((5, height, width, 3), seed=1)
regularizer = tf.contrib.layers.l2_regularizer(0.01)
tf.contrib.layers.separable_conv2d(
images, 32, [3, 3], 2, weights_regularizer=regularizer)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 2)
weight_decay = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEquals(
weight_decay.op.name,
'SeparableConv2d/depthwise_weights/Regularizer/l2_regularizer')
sess.run(tf.initialize_all_variables())
self.assertLessEqual(sess.run(weight_decay), 0.05)
weight_decay = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[1]
self.assertEquals(
weight_decay.op.name,
'SeparableConv2d/pointwise_weights/Regularizer/l2_regularizer')
self.assertLessEqual(sess.run(weight_decay), 0.05)
def testReuseConvWithWeightDecay(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
regularizer = tf.contrib.layers.l2_regularizer(0.01)
tf.contrib.layers.separable_conv2d(
images, 32, [3, 3], 2,
weights_regularizer=regularizer,
scope='conv1')
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 2)
tf.contrib.layers.separable_conv2d(
images, 32, [3, 3], 2,
weights_regularizer=regularizer,
scope='conv1', reuse=True)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 2)
def testConvWithBatchNorm(self):
height, width = 3, 3
batch_norm_collection = 'moving_vars'
normalizer_params = {
'variables_collections': {
'beta': [batch_norm_collection],
'gamma': [batch_norm_collection],
'moving_mean': [batch_norm_collection],
'moving_variance': [batch_norm_collection],
}
}
images = tf.random_uniform((5, height, width, 3), seed=1)
net = tf.contrib.layers.separable_conv2d(
images, 8, [3, 3], 2,
normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params=normalizer_params,
scope='conv1')
net = tf.contrib.layers.separable_conv2d(
net, 32, [3, 3], 2,
normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params=normalizer_params,
scope='conv2')
self.assertEquals(len(tf.get_collection(batch_norm_collection)), 6)
self.assertEquals(
len(tf.contrib.framework.get_variables('conv1/BatchNorm')), 3)
self.assertEquals(
len(tf.contrib.framework.get_variables('conv2/BatchNorm')), 3)
def testConvWithInputsViaPlaceHolder(self):
height, width = 3, 3
images_placeholder = tf.placeholder(tf.float32, shape=(None, None, None, 3))
net = tf.contrib.layers.separable_conv2d(
images_placeholder, 8, [3, 3], 2,
normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={},
scope='conv1')
init_op = tf.initialize_all_variables()
with self.test_session() as sess:
images = np.random.rand(5, height, width, 3)
sess.run(init_op)
sess.run(net, feed_dict={images_placeholder: images})
class SoftmaxTests(tf.test.TestCase):
def setUp(self):
self.low = 1 / (1 + math.e)
self.high = math.e / (1 + math.e)
def testSoftmax2D(self):
logits = tf.constant([[0.0, 1], [1, 1], [1, 0]])
prediction = tf.contrib.layers.softmax(logits)
exp_prediction = np.array([[self.low, self.high],
[0.5, 0.5],
[self.high, self.low]])
with self.test_session() as sess:
prediction = sess.run(prediction)
self.assertAllClose(exp_prediction, prediction)
def testSoftmax3D(self):
logits = np.ones((2, 3, 2))
logits[0, 0, 0] = 0
logits[1, 1, 1] = 0
logits = tf.constant(logits)
exp_prediction = 0.5 * np.ones((2, 3, 2))
exp_prediction[0, 0, 0] = self.low
exp_prediction[0, 0, 1] = self.high
exp_prediction[1, 1, 0] = self.high
exp_prediction[1, 1, 1] = self.low
prediction = tf.contrib.layers.softmax(logits)
with self.test_session() as sess:
prediction = sess.run(prediction)
self.assertAllClose(exp_prediction, prediction)
def testSoftmax3DUnknownSize(self):
logits = np.ones((2, 3, 2))
logits[0, 0, 0] = 0
logits[1, 1, 1] = 0
logit_placeholder = tf.placeholder(tf.float32, shape=(None, None, 2))
feed_dict = {logit_placeholder: logits}
exp_prediction = 0.5 * np.ones((2, 3, 2))
exp_prediction[0, 0, 0] = self.low
exp_prediction[0, 0, 1] = self.high
exp_prediction[1, 1, 0] = self.high
exp_prediction[1, 1, 1] = self.low
prediction = tf.contrib.layers.softmax(logit_placeholder)
with self.test_session() as sess:
prediction = sess.run(prediction, feed_dict=feed_dict)
self.assertAllClose(exp_prediction, prediction)
def testSoftmaxUndefinedNthDimension(self):
logits = tf.placeholder(tf.float32)
with self.assertRaises(ValueError):
tf.contrib.layers.softmax(logits)
class StackTests(tf.test.TestCase):
def testStackFullyConnected(self):
height, width = 3, 3
with self.test_session():
images = np.random.uniform(size=(5, height * width * 3))
output = tf.contrib.layers.stack(images,
tf.contrib.layers.fully_connected,
[10, 20, 30])
self.assertEquals(output.op.name, 'Stack/fully_connected_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 30])
def testStackRelu(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height * width * 3), seed=1, name='images')
output = tf.contrib.layers.stack(images,
tf.contrib.layers.relu,
[10, 20, 30])
self.assertEquals(output.op.name, 'Stack/fully_connected_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 30])
def testStackConvolution2d(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
output = tf.contrib.layers.stack(images,
tf.contrib.layers.convolution2d,
[10, 20, 30],
kernel_size=[3, 3],
padding='SAME')
self.assertEquals(output.op.name, 'Stack/convolution2d_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 30])
def testStackWithScope(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
output = tf.contrib.layers.stack(images,
tf.contrib.layers.convolution2d,
[10, 20, 30],
kernel_size=[3, 3],
padding='SAME',
scope='conv1')
self.assertEquals(output.op.name, 'conv1/conv1_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 30])
class UnitNormTests(tf.test.TestCase):
def testUnitNormWithRandomMatrix(self):
height, width = 2, 3
for dim in range(3):
tf.set_random_seed(0)
image = tf.random_uniform((height, width, 3))
output = tf.contrib.layers.unit_norm(image, dim=dim, epsilon=1e-6)
norms = tf.sqrt(tf.reduce_sum(tf.square(output), reduction_indices=dim))
shape = [height, width, 3]
del shape[dim]
expected = np.ones(shape)
with self.test_session():
actual = norms.eval()
self.assertAllClose(expected, actual, 1e-4, 1e-4)
def testDimEqualToRankRaisesError(self):
height, width = 2, 3
tf.set_random_seed(0)
image = tf.random_uniform((height, width, 3))
with self.assertRaises(ValueError):
tf.contrib.layers.unit_norm(image, dim=3, epsilon=1e-6)
def testUnknownRankRaisesError(self):
image = tf.placeholder(tf.float32)
with self.assertRaises(ValueError):
tf.contrib.layers.unit_norm(image, dim=2)
def testKnownRankUnknownDimsSucceeds(self):
height, width = 2, 3
for dim in range(3):
placeholder_value = np.ones((height, width, 3))
shape = [height, width, 3]
del shape[dim]
expected = np.ones(shape)
image = tf.placeholder(tf.float32, (None, None, 3))
output = tf.contrib.layers.unit_norm(image, dim=dim, epsilon=1e-6)
norms = tf.sqrt(tf.reduce_sum(tf.square(output), reduction_indices=dim))
with self.test_session():
actual = norms.eval({image: placeholder_value})
self.assertAllClose(expected, actual, 1e-4, 1e-4)
# TODO(b/28426988): Add separate tests for non-legacy versions.
class LegacyFullyConnectedTest(tf.test.TestCase):
def setUp(self):
tf.test.TestCase.setUp(self)
tf.set_random_seed(1234)
self.input = tf.constant([[1., 2., 3.], [-4., 15., -6.]])
self.input_3_dim_arr = [[[1., 1.1, 1.2],
[2., 2.1, 2.2],
[3., 3.1, 3.2],
[4., 4.1, 4.2]],
[[5., 5.1, 5.2],
[6., 6.1, 6.2],
[7., 7.1, 7.2],
[8., 8.1, 8.2]]]
self.input_3_dim = tf.constant(self.input_3_dim_arr)
assert not tf.get_collection(tf.GraphKeys.SUMMARIES)
def _fully_connected_basic_use(self, x, num_output_units, expected_shape):
output = tf.contrib.layers.legacy_fully_connected(x,
num_output_units,
activation_fn=tf.nn.relu)
with tf.Session() as sess:
with self.assertRaises(tf.errors.FailedPreconditionError):
sess.run(output)
tf.initialize_all_variables().run()
out_value, shape_value = sess.run([output, tf.shape(output)])
self.assertAllClose(shape_value, expected_shape)
self.assertEquals(output.get_shape().as_list(), expected_shape)
self.assertTrue(np.all(out_value >= 0),
'Relu should have all values >= 0.')
self.assertEqual(2,
len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)))
self.assertEqual(0,
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)))
def test_fully_connected_basic_use(self):
self._fully_connected_basic_use(self.input, 8, [2, 8])
def test_fully_connected_basic_use_multi_dim(self):
for last_dim in [1, 3]:
self.setUp()
self._fully_connected_basic_use(
self.input_3_dim, last_dim, [2, 4, last_dim])
def test_relu_layer_basic_use(self):
output = tf.contrib.layers.legacy_relu(self.input, 8)
with tf.Session() as sess:
with self.assertRaises(tf.errors.FailedPreconditionError):
sess.run(output)
tf.initialize_all_variables().run()
out_value = sess.run(output)
self.assertEqual(output.get_shape().as_list(), [2, 8])
self.assertTrue(np.all(out_value >= 0),
'Relu should have all values >= 0.')
self.assertEqual(2,
len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)))
self.assertEqual(0,
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)))
def test_variable_reuse_with_scope(self):
with tf.variable_scope('test') as vs:
output1 = tf.contrib.layers.legacy_relu(self.input, 8)
output2 = tf.contrib.layers.legacy_relu(self.input, 8)
with tf.variable_scope(vs, reuse=True):
output3 = tf.contrib.layers.legacy_relu(self.input, 8)
with tf.Session() as sess:
tf.initialize_all_variables().run()
out_value1, out_value2, out_value3 = sess.run([output1, output2, output3])
self.assertFalse(np.allclose(out_value1, out_value2))
self.assertAllClose(out_value1, out_value3)
def test_variable_reuse_with_template(self):
tmpl1 = tf.make_template('test',
tf.contrib.layers.legacy_fully_connected,
num_output_units=8)
output1 = tmpl1(self.input)
output2 = tmpl1(self.input)
with tf.Session() as sess:
tf.initialize_all_variables().run()
out_value1, out_value2 = sess.run([output1, output2])
self.assertAllClose(out_value1, out_value2)
def _custom_initializers(self, x, num_output_units, expected_outputs):
output = tf.contrib.layers.legacy_relu(
x,
num_output_units,
weight_init=tf.constant_initializer(2.0),
bias_init=tf.constant_initializer(1.0))
with tf.Session() as sess:
tf.initialize_all_variables().run()
out_value = sess.run(output)
self.assertAllClose(np.array(expected_outputs), out_value)
def test_custom_initializers(self):
self._custom_initializers(
self.input, 2, [[13.0, 13.0], [11.0, 11.0]])
def test_custom_initializers_multi_dim(self):
self._custom_initializers(self.input_3_dim,
2,
[[[7.6, 7.6],
[13.6, 13.6],
[19.6, 19.6],
[25.6, 25.6]],
[[31.6, 31.6],
[37.6, 37.6],
[43.6, 43.6],
[49.6, 49.6]]])
def test_custom_collections(self):
tf.contrib.layers.legacy_relu(self.input,
2,
weight_collections=['unbiased'],
bias_collections=['biased'],
output_collections=['output'])
self.assertEquals(1, len(tf.get_collection('unbiased')))
self.assertEquals(1, len(tf.get_collection('biased')))
self.assertEquals(1, len(tf.get_collection('output')))
self.assertEquals(2, len(tf.get_collection(tf.GraphKeys.VARIABLES)))
def test_all_custom_collections(self):
tf.contrib.layers.legacy_relu(self.input,
2,
weight_collections=['unbiased', 'all'],
bias_collections=['biased', 'all'])
self.assertEquals(1, len(tf.get_collection('unbiased')))
self.assertEquals(1, len(tf.get_collection('biased')))
self.assertEquals(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES),
tf.get_collection('all'))
def test_no_bias(self):
tf.contrib.layers.legacy_relu(self.input, 2, bias_init=None)
self.assertEqual(1, len(tf.get_collection(tf.GraphKeys.VARIABLES)))
def test_no_activation(self):
y = tf.contrib.layers.legacy_fully_connected(self.input, 2)
self.assertEquals(2, len(tf.get_collection(tf.GraphKeys.VARIABLES)))
self.assertEquals('BiasAdd', y.op.type)
def test_no_activation_no_bias(self):
y = tf.contrib.layers.legacy_fully_connected(self.input, 2, bias_init=None)
self.assertEquals(1, len(tf.get_collection(tf.GraphKeys.VARIABLES)))
self.assertEquals('MatMul', y.op.type)
def test_regularizer(self):
cnt = [0]
tensor = tf.constant(5.0)
def test_fn(_):
cnt[0] += 1
return tensor
tf.contrib.layers.legacy_fully_connected(self.input,
2,
weight_regularizer=test_fn)
self.assertEqual([tensor],
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
self.assertEqual(1, cnt[0])
def test_regularizer_with_multiple_variables(self):
cnt = [0]
tensor = tf.constant(5.0)
def test_fn(_):
cnt[0] += 1
return tensor
tf.contrib.layers.legacy_fully_connected(self.input,
2,
weight_regularizer=test_fn)
tf.contrib.layers.legacy_fully_connected(self.input,
2,
weight_regularizer=test_fn)
self.assertEqual([tensor, tensor],
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
self.assertEqual(2, cnt[0])
def test_regularizer_with_variable_reuse(self):
cnt = [0]
tensor = tf.constant(5.0)
def test_fn(_):
cnt[0] += 1
return tensor
with tf.variable_scope('test') as vs:
tf.contrib.layers.legacy_fully_connected(self.input,
2,
weight_regularizer=test_fn)
with tf.variable_scope(vs, reuse=True):
tf.contrib.layers.legacy_fully_connected(self.input,
2,
weight_regularizer=test_fn)
self.assertEqual([tensor],
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
self.assertEqual(1, cnt[0])
def test_empty_x_results_in_empty_output(self):
# Empty x is common if someone masks their input with tf.boolean_mask in
# order to drop missing entries, and in a particular batch all entries are
# missing.
with self.test_session():
x = np.array([]).reshape(0, 3)
self.assertEqual(0, tf.size(x).eval())
y = tf.contrib.layers.legacy_fully_connected(x,
2,
activation_fn=tf.nn.softmax)
tf.initialize_all_variables().run()
expected_y = np.array([]).reshape(0, 2)
np.testing.assert_array_equal(expected_y, y.eval())
def test_shapes_variable_first_dim(self):
# first dimension is not known statically.
x = tf.placeholder(tf.float32, shape=[None, 4, 3])
y = tf.contrib.layers.legacy_fully_connected(x, 1)
# in the output we still only know the 2nd and 3rd dimensions statically.
self.assertEquals(y.get_shape().as_list(), [None, 4, 1])
with self.test_session() as sess:
tf.initialize_all_variables().run()
# we can feed in input with first dimension 2
shape_value = sess.run(tf.shape(y), feed_dict={x: self.input_3_dim_arr})
self.assertAllClose(shape_value, [2, 4, 1])
# we can feed in input with first dimension 1
shape_value = sess.run(tf.shape(y),
feed_dict={x: [self.input_3_dim_arr[0]]})
self.assertAllClose(shape_value, [1, 4, 1])
# we cannot feed in input with inconsistent dimensions
with self.assertRaises(ValueError):
sess.run(tf.shape(y), feed_dict={x: [[[]]]})
def _unknown_dim_invalid_input(self, last_dim):
x = tf.placeholder(tf.float32, shape=[3, last_dim])
tf.contrib.layers.legacy_fully_connected(x, 2, activation_fn=None)
def test_known_dim_valid_input(self):
self._unknown_dim_invalid_input(last_dim=3)
def test_unknown_dim_invalid_input(self):
with self.assertRaisesRegexp(
ValueError, 'last dimension of x must be known but is None'):
self._unknown_dim_invalid_input(last_dim=None)
def test_1d_invalid_input(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError,
'rank of x must be at least 2 not: 1'):
x = tf.constant([[]], shape=[0])
tf.contrib.layers.legacy_fully_connected(x,
2,
activation_fn=tf.nn.softmax)
if __name__ == '__main__':
tf.test.main()
|
py | b40a89dbeed87e8ea15a217d07a66609db744806 | class group:
def __init__(self, username, password ):
self.username = username
self.password = password |
py | b40a8a6bf8c5a811ace55e369292fe9d06541b6f | import os
import torch
import numpy as np
import torch.nn as nn
from torchvision import transforms
import torchvision.datasets as dset
from torch.utils.data.sampler import SubsetRandomSampler
from FFNN import Model_Mnist
# Opciones para reentrenar el modelo
retrain = False
path_model_retrain = "./model_best.pt"
# Seteamos la semilla para poder reproducir el modelo despues
seed = 57
torch.manual_seed(seed)
# Checkeamos que estemos usando GPU
use_cuda = torch.cuda.is_available()
if use_cuda:
torch.cuda.manual_seed(seed)
kwargs = {'num_workers': 1, 'pin_memory': True}
else:
kwargs = {}
# Directorio de los datos para el entrenamiento, si no existen los datos de Mnist los descargamos
root_data = './data'
exist_data = os.path.isdir(root_data)
if not exist_data:
os.mkdir(root_data)
download = True if not exist_data else False
trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
train_set = dset.MNIST(root=root_data, train=True, transform=trans, download=download)
test_set = dset.MNIST(root=root_data, train=False, transform=trans)
valid_size = 0.1
num_train = len(train_set)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# Directorio de los modelos
path_model = './models/'
exist_model = os.path.isdir(path_model)
if not exist_model:
os.mkdir(path_model)
# Hiperparámetros para el entrenamiento
batch_size = 32
epochs = 10
lr = 0.01
momentum = 0.5
input_size = 28*28
hidden_size = [2500, 2000, 1500, 1000, 50]
output_size = 10
number_hidden_layers = 4
train_loader = torch.utils.data.DataLoader(dataset=train_set,
batch_size=batch_size,
shuffle=False,
sampler=train_sampler,
**kwargs
)
val_loader = torch.utils.data.DataLoader(dataset=test_set,
batch_size=batch_size,
sampler=valid_sampler,
shuffle=False,
**kwargs
)
print("Total trainning batch number: {}".format(len(train_loader)))
print("Total validating batch number: {}".format(len(val_loader)))
loss_metric = nn.CrossEntropyLoss()
model = Model_Mnist(use_cuda=use_cuda,
loss_metric=loss_metric,
lr=lr,
momentum=momentum,
root_models=path_model,
input_size = input_size,
hidden_size = hidden_size,
output_size = output_size,
number_hidden_layers = number_hidden_layers
)
if not retrain:
model.train(epochs=epochs,
train_loader=train_loader,
val_loader=val_loader)
else:
model.retrain(path_model_retrain)
model.train(epochs=epochs,
train_loader=train_loader,
val_loader=val_loader) |
py | b40a8ad12e9dd7d4799aa449dd8f048c708afe8e | import inspect
try:
raise ValueError("there is something wrong")
except Exception as e:
print(e)
print(e.__class__)
|
py | b40a8b572bbeee3f598bb14bbaa4b28003f61ab3 | import sys
import random
import numpy as np
import data_proc
if len(sys.argv) != 2:
print('Please specify output file name')
exit(0)
try:
with open(sys.argv[1], 'w') as f:
pass
except:
print('Error while opening the output file')
sys.exit(0)
train_x, _, _, _ = data_proc.read_mnist()
# Get all points
points = set()
for i in range(train_x.shape[0]):
x = train_x[i].reshape((28, 28))
for j in range(1, 28):
points.add(tuple(x.T[j-1:j+1].flat))
points.remove((0, )*56)
points = [np.array(p) for p in points]
print(len(points))
# K-means
# Special case: always make (0, ) * 28 be one of the center
M = data_proc.M
centers = []
members_sum = []
members_num = []
converge = False
def init_centers():
global centers
centers = []
for i in range(M-1):
centers.append(random.choice(points))
def update_member():
global members_sum
global members_num
global converge
last_num = members_num
members_sum = np.full((M-1, 56), 0, dtype=np.int64)
members_num = np.full((M-1), 0, dtype=np.int64)
for p in points:
closest = np.argmin(((p-centers)**2).sum(axis=1))
members_sum[closest] += p
members_num[closest] += 1
if np.array_equal(last_num, members_num):
converge = True
def update_center():
global centers
for i in range(M-1):
centers[i] = members_sum[i] / members_num[i]
init_centers()
for i in range(1000):
update_member()
update_center()
# Print temporary result at each iteration
with open(sys.argv[1], 'w') as f:
print('iteration %d' % (i), file=f)
for c in centers:
print(tuple(c), file=f)
print((0, ) * 56, file=f)
print(list(members_num), file=f)
if converge:
print('converge', file=f)
break
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.